url
stringlengths 51
54
| repository_url
stringclasses 1
value | labels_url
stringlengths 65
68
| comments_url
stringlengths 60
63
| events_url
stringlengths 58
61
| html_url
stringlengths 39
44
| id
int64 1.78B
2.82B
| node_id
stringlengths 18
19
| number
int64 1
8.69k
| title
stringlengths 1
382
| user
dict | labels
listlengths 0
5
| state
stringclasses 2
values | locked
bool 1
class | assignee
dict | assignees
listlengths 0
2
| milestone
null | comments
int64 0
323
| created_at
timestamp[s] | updated_at
timestamp[s] | closed_at
timestamp[s] | author_association
stringclasses 4
values | sub_issues_summary
dict | active_lock_reason
null | draft
bool 2
classes | pull_request
dict | body
stringlengths 2
118k
⌀ | closed_by
dict | reactions
dict | timeline_url
stringlengths 60
63
| performed_via_github_app
null | state_reason
stringclasses 4
values | is_pull_request
bool 2
classes |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://api.github.com/repos/ollama/ollama/issues/655 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/655/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/655/comments | https://api.github.com/repos/ollama/ollama/issues/655/events | https://github.com/ollama/ollama/issues/655 | 1,920,161,554 | I_kwDOJ0Z1Ps5yc1cS | 655 | Question: where is ollama.ai website source? | {
"login": "jamesbraza",
"id": 8990777,
"node_id": "MDQ6VXNlcjg5OTA3Nzc=",
"avatar_url": "https://avatars.githubusercontent.com/u/8990777?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jamesbraza",
"html_url": "https://github.com/jamesbraza",
"followers_url": "https://api.github.com/users/jamesbraza/followers",
"following_url": "https://api.github.com/users/jamesbraza/following{/other_user}",
"gists_url": "https://api.github.com/users/jamesbraza/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jamesbraza/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jamesbraza/subscriptions",
"organizations_url": "https://api.github.com/users/jamesbraza/orgs",
"repos_url": "https://api.github.com/users/jamesbraza/repos",
"events_url": "https://api.github.com/users/jamesbraza/events{/privacy}",
"received_events_url": "https://api.github.com/users/jamesbraza/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2023-09-30T07:45:04 | 2023-12-04T19:56:41 | 2023-12-04T19:56:40 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I was going to try to make some docs PRs into [ollama.ai](https://ollama.ai/). Where is the source code for the website? | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/655/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/655/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/294 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/294/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/294/comments | https://api.github.com/repos/ollama/ollama/issues/294/events | https://github.com/ollama/ollama/issues/294 | 1,838,026,789 | I_kwDOJ0Z1Ps5tjhAl | 294 | Streaming responses should have `Content-Type` set to `application/x-ndjson ` | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 5667396210,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2acg",
"url": "https://api.github.com/repos/ollama/ollama/labels/good%20first%20issue",
"name": "good first issue",
"color": "7057ff",
"default": true,
"description": "Good for newcomers"
}
] | closed | false | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 4 | 2023-08-06T03:26:04 | 2024-01-27T00:21:55 | 2023-08-09T04:38:40 | MEMBER | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Currently streaming responses return `text/plain` but they should return `application/x-ndjson `. Later we should consider `application/json` (see #281) or `text/event-stream` for browser based clients | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/294/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/294/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/1100 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1100/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1100/comments | https://api.github.com/repos/ollama/ollama/issues/1100/events | https://github.com/ollama/ollama/issues/1100 | 1,989,515,984 | I_kwDOJ0Z1Ps52lZrQ | 1,100 | asking a LLM to process a csv file as a source for data | {
"login": "igorschlum",
"id": 2884312,
"node_id": "MDQ6VXNlcjI4ODQzMTI=",
"avatar_url": "https://avatars.githubusercontent.com/u/2884312?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/igorschlum",
"html_url": "https://github.com/igorschlum",
"followers_url": "https://api.github.com/users/igorschlum/followers",
"following_url": "https://api.github.com/users/igorschlum/following{/other_user}",
"gists_url": "https://api.github.com/users/igorschlum/gists{/gist_id}",
"starred_url": "https://api.github.com/users/igorschlum/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/igorschlum/subscriptions",
"organizations_url": "https://api.github.com/users/igorschlum/orgs",
"repos_url": "https://api.github.com/users/igorschlum/repos",
"events_url": "https://api.github.com/users/igorschlum/events{/privacy}",
"received_events_url": "https://api.github.com/users/igorschlum/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396220,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA",
"url": "https://api.github.com/repos/ollama/ollama/labels/question",
"name": "question",
"color": "d876e3",
"default": true,
"description": "General questions"
}
] | closed | false | null | [] | null | 2 | 2023-11-12T18:24:04 | 2024-05-06T23:26:32 | 2024-05-06T23:26:31 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I don't know how to ask Ollama to process a csv file. When I ask Falcon of Llama2 to do so, they give me instructions that are not functional.
I tried a prompt like this:
"$(cat /Users/igor/Documents/text.txt)" please translate this text in English.
Falcon: Yes, I can translate it for you. However, I need the text in question to be able to
translate it accurately. Please provide the text for me to translate.<|endoftext|>
(base) igor@MacStudiodeIgor-001 ~ % ollama run llama2 "$(cat /Users/igor/Documents/text.txt)" please summarize this data
Of course! I'd be happy to help you summarize a text file using natural language processing (NLP)
techniques. To get started, could you please provide me with the text file you would like me to
summarize? Additionally, please let me know what kind of summary you would like me to generate
(e.g. a short summary, a detailed summary, a summary of key points)?
>>> how can I provide you with a text file in csv to process it?
Great! You can provide me with a CSV file in several ways:
1. Upload it to the chat: You can upload your CSV file to the chat by clicking on the "Attach file"
or "Upload" button on the bottom left corner of the chat window. Select the CSV file from your
computer and I'll be able to access it.
2. Share a link: If you prefer, you can share a link to a cloud storage service like Google Drive,
Dropbox, or OneDrive where the CSV file is located. Just provide me with the link and I'll be able
to access the file.
3. Provide a download link: If you have the CSV file hosted on a website or server, you can provide
me with a link to download it directly.
4. Manually enter data: If you don't have a CSV file available, you can manually enter the data
into the chat window using the following format:
```csv
column1,column2,...
value1,value2,...
```
For example, if your CSV file has two columns named "Name" and "Age", you can enter the data like
this:
```css
Name,Age
John,25
Mary,30
Bob,35
...
```
Please let me know which method you prefer, and I'll be happy to help you process the CSV file!
>>> how I can ask you to translate a google doc file. The URL of the file is "https://docs.google.com/doc
... ument/d/1xgKxH1ZgIQJ5M_HZ_AqGudzN8cUSwGyeF-dEIoXtEng/edit?usp=sharing" ?
I'm just an AI and do not have the ability to access external links or documents. Therefore, I am
unable to translate the contents of the shared document directly.
| {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1100/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1100/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/753 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/753/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/753/comments | https://api.github.com/repos/ollama/ollama/issues/753/events | https://github.com/ollama/ollama/pull/753 | 1,936,541,042 | PR_kwDOJ0Z1Ps5ccs1t | 753 | rename the examples to be more descriptive | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2023-10-11T00:40:54 | 2023-10-12T18:24:13 | 2023-10-12T18:24:12 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/753",
"html_url": "https://github.com/ollama/ollama/pull/753",
"diff_url": "https://github.com/ollama/ollama/pull/753.diff",
"patch_url": "https://github.com/ollama/ollama/pull/753.patch",
"merged_at": "2023-10-12T18:24:12"
} | also add a few readmes. | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/753/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/753/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5469 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5469/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5469/comments | https://api.github.com/repos/ollama/ollama/issues/5469/events | https://github.com/ollama/ollama/pull/5469 | 2,389,580,481 | PR_kwDOJ0Z1Ps50YM8_ | 5,469 | Prevent loading models larger than total memory | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 9 | 2024-07-03T22:16:39 | 2024-08-06T20:42:03 | 2024-07-05T15:22:20 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5469",
"html_url": "https://github.com/ollama/ollama/pull/5469",
"diff_url": "https://github.com/ollama/ollama/pull/5469.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5469.patch",
"merged_at": "2024-07-05T15:22:20"
} | Users may not realize the shiny new model they're trying to load fits on their disk, but can't load into system+GPU memory. Today we crash, but with this fix, we'll give them a better error message before even trying to load it.
Fixes #3837 #4955
Verified by using `stress-ng` to saturate system memory, and loaded a secondary model on another ollama instance to use up GPU memory, then tried to load a model
```
% ollama run gemma:7b
Error: requested model (5.5 GiB) is too large for this system (4.5 GiB)
```
Debug server logs from the manual test
```
time=2024-07-03T15:12:11.901-07:00 level=DEBUG source=gpu.go:336 msg="updating system memory data" before.total="31.3 GiB" before.free="4.0 GiB" now.total="31.3 GiB" now.free="3.9 GiB"
CUDA driver version: 11.4
time=2024-07-03T15:12:12.000-07:00 level=DEBUG source=gpu.go:377 msg="updating cuda memory data" gpu=GPU-1c750365-54dc-7082-7c6b-9dd953a68ab6 name="NVIDIA GeForce GTX 1060 6GB" before.total="5.9 GiB" before.free="548.9 MiB" now.total="5.9 GiB" now.free="548.9 MiB" now.used="5.4 GiB"
releasing cuda driver library
time=2024-07-03T15:12:12.000-07:00 level=DEBUG source=sched.go:186 msg="updating default concurrency" OLLAMA_MAX_LOADED_MODELS=3 gpu_count=1
time=2024-07-03T15:12:12.033-07:00 level=DEBUG source=memory.go:101 msg=evaluating library=cuda gpu_count=1 available="[548.9 MiB]"
time=2024-07-03T15:12:12.034-07:00 level=DEBUG source=memory.go:168 msg="gpu has too little memory to allocate any layers" gpu="{memInfo:{TotalMemory:6372196352 FreeMemory:575537152} Library:cuda Variant:no vector extensions MinimumMemory:479199232 DependencyPath: EnvWorkarounds:[] UnreliableFreeMemory:false ID:GPU-1c750365-54dc-7082-7c6b-9dd953a68ab6 Name:NVIDIA GeForce GTX 1060 6GB Compute:6.1 DriverMajor:11 DriverMinor:4}"
time=2024-07-03T15:12:12.034-07:00 level=DEBUG source=memory.go:296 msg="insufficient VRAM to load any model layers"
time=2024-07-03T15:12:12.034-07:00 level=WARN source=sched.go:216 msg="model request too large for system" requested="5.5 GiB" system="4.5 GiB"
[GIN] 2024/07/03 - 15:12:12 | 500 | 200.768652ms | 10.16.0.83 | POST "/api/chat"
```
The system under test:
```
% free -h; nvidia-smi
total used free shared buff/cache available
Mem: 31Gi 26Gi 394Mi 197Mi 4.2Gi 3.9Gi
Swap: 0B 0B 0B
Wed Jul 3 15:15:17 2024
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.256.02 Driver Version: 470.256.02 CUDA Version: 11.4 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce ... Off | 00000000:01:00.0 Off | N/A |
| 33% 33C P8 13W / 120W | 5467MiB / 6077MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 1582218 C ...a_v11/ollama_llama_server 5465MiB |
+-----------------------------------------------------------------------------+
``` | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5469/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5469/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/6968 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6968/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6968/comments | https://api.github.com/repos/ollama/ollama/issues/6968/events | https://github.com/ollama/ollama/issues/6968 | 2,549,149,437 | I_kwDOJ0Z1Ps6X8O79 | 6,968 | Adjust templates for FIM models to acknowledge existence of suffix | {
"login": "sestinj",
"id": 33237525,
"node_id": "MDQ6VXNlcjMzMjM3NTI1",
"avatar_url": "https://avatars.githubusercontent.com/u/33237525?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sestinj",
"html_url": "https://github.com/sestinj",
"followers_url": "https://api.github.com/users/sestinj/followers",
"following_url": "https://api.github.com/users/sestinj/following{/other_user}",
"gists_url": "https://api.github.com/users/sestinj/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sestinj/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sestinj/subscriptions",
"organizations_url": "https://api.github.com/users/sestinj/orgs",
"repos_url": "https://api.github.com/users/sestinj/repos",
"events_url": "https://api.github.com/users/sestinj/events{/privacy}",
"received_events_url": "https://api.github.com/users/sestinj/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 0 | 2024-09-25T23:12:18 | 2024-09-25T23:12:18 | null | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
CodeGemma (for example, it's not the only one) supports both FIM and chat. Ollama uses the FIM template for codegemma:2b and the chat template for codegemma:7b. This feels like the right default decision, but in cases where a suffix is provided, it can be confidently assumed that a FIM format is desired.
The solution to this would be a prompt template that is wrapped in an "if suffix", something like this:
```
{{- if .Suffix }}<|fim_prefix|>{{ .Prompt }}<|fim_suffix|>{{ .Suffix }}<|fim_middle|>
{{- else }}<start_of_turn>user
{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}<end_of_turn>
<start_of_turn>model
{{ .Response }}<end_of_turn>
{{- end }}
```
One caveat I can see is it would make the prompt template slightly less readable for users browsing the Ollama template library.
### OS
macOS
### GPU
Apple
### CPU
Apple
### Ollama version
0.3.11 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6968/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6968/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4289 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4289/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4289/comments | https://api.github.com/repos/ollama/ollama/issues/4289/events | https://github.com/ollama/ollama/pull/4289 | 2,287,981,952 | PR_kwDOJ0Z1Ps5vAaP5 | 4,289 | Doc container usage and workaround for nvidia errors | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-09T15:52:08 | 2024-05-09T16:27:32 | 2024-05-09T16:27:30 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4289",
"html_url": "https://github.com/ollama/ollama/pull/4289",
"diff_url": "https://github.com/ollama/ollama/pull/4289.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4289.patch",
"merged_at": "2024-05-09T16:27:30"
} | Fixes #4242 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4289/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4289/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/1939 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1939/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1939/comments | https://api.github.com/repos/ollama/ollama/issues/1939/events | https://github.com/ollama/ollama/issues/1939 | 2,077,812,700 | I_kwDOJ0Z1Ps572Ofc | 1,939 | Unable to load dynamic library error when using container | {
"login": "otavio-silva",
"id": 22914610,
"node_id": "MDQ6VXNlcjIyOTE0NjEw",
"avatar_url": "https://avatars.githubusercontent.com/u/22914610?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/otavio-silva",
"html_url": "https://github.com/otavio-silva",
"followers_url": "https://api.github.com/users/otavio-silva/followers",
"following_url": "https://api.github.com/users/otavio-silva/following{/other_user}",
"gists_url": "https://api.github.com/users/otavio-silva/gists{/gist_id}",
"starred_url": "https://api.github.com/users/otavio-silva/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/otavio-silva/subscriptions",
"organizations_url": "https://api.github.com/users/otavio-silva/orgs",
"repos_url": "https://api.github.com/users/otavio-silva/repos",
"events_url": "https://api.github.com/users/otavio-silva/events{/privacy}",
"received_events_url": "https://api.github.com/users/otavio-silva/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 26 | 2024-01-12T00:17:28 | 2024-01-19T21:41:09 | 2024-01-19T21:41:09 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | # Description
When trying to run a model using the container, it gives the an error about loading a dynamic library. Ollama is able to list the available models but not run them. The container can see the GPU as `nvidia-smi` gives the expected output.
# Current output
```cpp
Error: Unable to load dynamic library: Unable to load dynamic server library: /tmp/ollama946395612/cpu_avx2/libext_server.so: undefined symbol: _ZTVN10__cxxabiv117__c
```
# Expected output
To the model to run correctly.
# Steps to reproduce
1. Run the command `podman run --device nvidia.com/gpu=all --security-opt label=disable --detach --volume .ollama:/root/.ollama -p 11434:11434 --name ollama-20 ollama/ollama:0.1.20`
2. Run the command ` podman exec -it ollama-20 ollama run llama2`
3. See error
# System info
```log
Nome do host: GE76RAIDER
Nome do sistema operacional: Microsoft Windows 11 Pro
Versão do sistema operacional: 10.0.22631 N/A compilação 22631
Fabricante do sistema operacional: Microsoft Corporation
Configuração do SO: Estação de trabalho autônoma
Tipo de compilação do sistema operacional: Multiprocessor Free
Proprietário registrado: [email protected]
Organização registrada: N/A
Identificação do produto: 00330-80000-00000-AA520
Data da instalação original: 02/08/2023, 14:30:14
Tempo de Inicialização do Sistema: 10/01/2024, 12:32:44
Fabricante do sistema: Micro-Star International Co., Ltd.
Modelo do sistema: Raider GE76 12UHS
Tipo de sistema: x64-based PC
Processador(es): 1 processador(es) instalado(s).
[01]: Intel64 Family 6 Model 154 Stepping 3 GenuineIntel ~2900 Mhz
Versão do BIOS: American Megatrends International, LLC. E17K4IMS.20D, 26/06/2023
Pasta do Windows: C:\WINDOWS
Pasta do sistema: C:\WINDOWS\system32
Inicializar dispositivo: \Device\HarddiskVolume1
Localidade do sistema: pt-br;Português (Brasil)
Localidade de entrada: en-us;Inglês (Estados Unidos)
Fuso horário: (UTC-03:00) Brasília
Memória física total: 65.237 MB
Memória física disponível: 44.469 MB
Memória Virtual: Tamanho Máximo: 74.965 MB
Memória Virtual: Disponível: 47.017 MB
Memória Virtual: Em Uso: 27.948 MB
Local(is) de arquivo de paginação: C:\pagefile.sys
Domínio: WORKGROUP
Servidor de Logon: \\GE76RAIDER
Hotfix(es): 4 hotfix(es) instalado(s).
[01]: KB5033920
[02]: KB5027397
[03]: KB5034123
[04]: KB5032393
Placa(s) de Rede: 3 NIC(s) instalado(s).
[01]: Killer E3100G 2.5 Gigabit Ethernet Controller
Nome da conexão: Ethernet
Status: Mídia desconectada
[02]: Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)
Nome da conexão: Wi-Fi
DHCP ativado: Sim
Servidor DHCP: 192.168.1.1
Endereço(es) IP
[01]: 192.168.1.26
[03]: TAP-Windows Adapter V9
Nome da conexão: TAP-Windows
Status: Mídia desconectada
Requisitos do Hyper-V: Hipervisor detectado. Recursos necessários para o Hyper-V não serão exibidos.
```
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1939/reactions",
"total_count": 2,
"+1": 2,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1939/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3021 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3021/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3021/comments | https://api.github.com/repos/ollama/ollama/issues/3021/events | https://github.com/ollama/ollama/issues/3021 | 2,177,134,712 | I_kwDOJ0Z1Ps6BxHB4 | 3,021 | API endpoint for encoding and decoding tokens | {
"login": "Hansson0728",
"id": 9604420,
"node_id": "MDQ6VXNlcjk2MDQ0MjA=",
"avatar_url": "https://avatars.githubusercontent.com/u/9604420?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Hansson0728",
"html_url": "https://github.com/Hansson0728",
"followers_url": "https://api.github.com/users/Hansson0728/followers",
"following_url": "https://api.github.com/users/Hansson0728/following{/other_user}",
"gists_url": "https://api.github.com/users/Hansson0728/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Hansson0728/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Hansson0728/subscriptions",
"organizations_url": "https://api.github.com/users/Hansson0728/orgs",
"repos_url": "https://api.github.com/users/Hansson0728/repos",
"events_url": "https://api.github.com/users/Hansson0728/events{/privacy}",
"received_events_url": "https://api.github.com/users/Hansson0728/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | null | [] | null | 3 | 2024-03-09T08:26:05 | 2024-09-04T04:34:44 | 2024-09-04T04:34:44 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Please please someone who knows go... add so the internal llamacpp encode endpoint is avilable to the ollama api, so we can use the llm tokenizer to measure how much context we are using accuratly, so we can pick and choose in our memory instead of only trimming from the beginning of our messages, please please | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3021/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3021/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3333 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3333/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3333/comments | https://api.github.com/repos/ollama/ollama/issues/3333/events | https://github.com/ollama/ollama/pull/3333 | 2,204,828,951 | PR_kwDOJ0Z1Ps5qm8bd | 3,333 | doc: specify ADAPTER is optional | {
"login": "bmizerany",
"id": 46,
"node_id": "MDQ6VXNlcjQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/46?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bmizerany",
"html_url": "https://github.com/bmizerany",
"followers_url": "https://api.github.com/users/bmizerany/followers",
"following_url": "https://api.github.com/users/bmizerany/following{/other_user}",
"gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions",
"organizations_url": "https://api.github.com/users/bmizerany/orgs",
"repos_url": "https://api.github.com/users/bmizerany/repos",
"events_url": "https://api.github.com/users/bmizerany/events{/privacy}",
"received_events_url": "https://api.github.com/users/bmizerany/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-03-25T03:53:16 | 2024-03-25T16:43:19 | 2024-03-25T16:43:19 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/3333",
"html_url": "https://github.com/ollama/ollama/pull/3333",
"diff_url": "https://github.com/ollama/ollama/pull/3333.diff",
"patch_url": "https://github.com/ollama/ollama/pull/3333.patch",
"merged_at": "2024-03-25T16:43:19"
} | null | {
"login": "bmizerany",
"id": 46,
"node_id": "MDQ6VXNlcjQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/46?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bmizerany",
"html_url": "https://github.com/bmizerany",
"followers_url": "https://api.github.com/users/bmizerany/followers",
"following_url": "https://api.github.com/users/bmizerany/following{/other_user}",
"gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions",
"organizations_url": "https://api.github.com/users/bmizerany/orgs",
"repos_url": "https://api.github.com/users/bmizerany/repos",
"events_url": "https://api.github.com/users/bmizerany/events{/privacy}",
"received_events_url": "https://api.github.com/users/bmizerany/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3333/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3333/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5248 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5248/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5248/comments | https://api.github.com/repos/ollama/ollama/issues/5248/events | https://github.com/ollama/ollama/pull/5248 | 2,369,211,125 | PR_kwDOJ0Z1Ps5zT90G | 5,248 | cmd: defer stating model info until necessary | {
"login": "bmizerany",
"id": 46,
"node_id": "MDQ6VXNlcjQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/46?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bmizerany",
"html_url": "https://github.com/bmizerany",
"followers_url": "https://api.github.com/users/bmizerany/followers",
"following_url": "https://api.github.com/users/bmizerany/following{/other_user}",
"gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions",
"organizations_url": "https://api.github.com/users/bmizerany/orgs",
"repos_url": "https://api.github.com/users/bmizerany/repos",
"events_url": "https://api.github.com/users/bmizerany/events{/privacy}",
"received_events_url": "https://api.github.com/users/bmizerany/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-06-24T05:00:57 | 2024-06-25T03:14:04 | 2024-06-25T03:14:03 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5248",
"html_url": "https://github.com/ollama/ollama/pull/5248",
"diff_url": "https://github.com/ollama/ollama/pull/5248.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5248.patch",
"merged_at": "2024-06-25T03:14:03"
} | This commit changes the 'ollama run' command to defer fetching model information until it really needs it. That is, when in interactive mode.
This positively impacts the performance of the command:
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total | {
"login": "bmizerany",
"id": 46,
"node_id": "MDQ6VXNlcjQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/46?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bmizerany",
"html_url": "https://github.com/bmizerany",
"followers_url": "https://api.github.com/users/bmizerany/followers",
"following_url": "https://api.github.com/users/bmizerany/following{/other_user}",
"gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions",
"organizations_url": "https://api.github.com/users/bmizerany/orgs",
"repos_url": "https://api.github.com/users/bmizerany/repos",
"events_url": "https://api.github.com/users/bmizerany/events{/privacy}",
"received_events_url": "https://api.github.com/users/bmizerany/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5248/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5248/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/4556 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4556/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4556/comments | https://api.github.com/repos/ollama/ollama/issues/4556/events | https://github.com/ollama/ollama/issues/4556 | 2,308,080,352 | I_kwDOJ0Z1Ps6JkoLg | 4,556 | Plugins | {
"login": "zorgoz",
"id": 1569170,
"node_id": "MDQ6VXNlcjE1NjkxNzA=",
"avatar_url": "https://avatars.githubusercontent.com/u/1569170?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/zorgoz",
"html_url": "https://github.com/zorgoz",
"followers_url": "https://api.github.com/users/zorgoz/followers",
"following_url": "https://api.github.com/users/zorgoz/following{/other_user}",
"gists_url": "https://api.github.com/users/zorgoz/gists{/gist_id}",
"starred_url": "https://api.github.com/users/zorgoz/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/zorgoz/subscriptions",
"organizations_url": "https://api.github.com/users/zorgoz/orgs",
"repos_url": "https://api.github.com/users/zorgoz/repos",
"events_url": "https://api.github.com/users/zorgoz/events{/privacy}",
"received_events_url": "https://api.github.com/users/zorgoz/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | open | false | null | [] | null | 1 | 2024-05-21T11:33:56 | 2024-06-07T16:47:51 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hello, are "tools" and "tool_choice" API supported, and if not, is there any roadmap for them? | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4556/reactions",
"total_count": 4,
"+1": 4,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4556/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4731 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4731/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4731/comments | https://api.github.com/repos/ollama/ollama/issues/4731/events | https://github.com/ollama/ollama/pull/4731 | 2,326,581,769 | PR_kwDOJ0Z1Ps5xD2Y4 | 4,731 | Update llama.cpp submodule to `5921b8f0` | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-30T21:40:26 | 2024-05-30T23:20:23 | 2024-05-30T23:20:22 | MEMBER | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4731",
"html_url": "https://github.com/ollama/ollama/pull/4731",
"diff_url": "https://github.com/ollama/ollama/pull/4731.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4731.patch",
"merged_at": "2024-05-30T23:20:22"
} | null | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4731/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4731/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/1785 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1785/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1785/comments | https://api.github.com/repos/ollama/ollama/issues/1785/events | https://github.com/ollama/ollama/pull/1785 | 2,066,007,927 | PR_kwDOJ0Z1Ps5jPgql | 1,785 | Load dynamic cpu lib on windows | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-01-04T16:51:13 | 2024-01-04T16:55:18 | 2024-01-04T16:55:02 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/1785",
"html_url": "https://github.com/ollama/ollama/pull/1785",
"diff_url": "https://github.com/ollama/ollama/pull/1785.diff",
"patch_url": "https://github.com/ollama/ollama/pull/1785.patch",
"merged_at": "2024-01-04T16:55:02"
} | On linux, we link the CPU library in to the Go app and fall back to it when no GPU match is found. On windows we do not link in the CPU library so that we can better control our dependencies for the CLI. This fixes the logic so we correctly fallback to the dynamic CPU library on windows. | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1785/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1785/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5013 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5013/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5013/comments | https://api.github.com/repos/ollama/ollama/issues/5013/events | https://github.com/ollama/ollama/issues/5013 | 2,350,097,352 | I_kwDOJ0Z1Ps6ME6PI | 5,013 | How to prevent the model from automatically releasing after 5 minutes when requesting an OpenAI package? | {
"login": "GoEnthusiast",
"id": 132556615,
"node_id": "U_kgDOB-anRw",
"avatar_url": "https://avatars.githubusercontent.com/u/132556615?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/GoEnthusiast",
"html_url": "https://github.com/GoEnthusiast",
"followers_url": "https://api.github.com/users/GoEnthusiast/followers",
"following_url": "https://api.github.com/users/GoEnthusiast/following{/other_user}",
"gists_url": "https://api.github.com/users/GoEnthusiast/gists{/gist_id}",
"starred_url": "https://api.github.com/users/GoEnthusiast/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/GoEnthusiast/subscriptions",
"organizations_url": "https://api.github.com/users/GoEnthusiast/orgs",
"repos_url": "https://api.github.com/users/GoEnthusiast/repos",
"events_url": "https://api.github.com/users/GoEnthusiast/events{/privacy}",
"received_events_url": "https://api.github.com/users/GoEnthusiast/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5789807732,
"node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA",
"url": "https://api.github.com/repos/ollama/ollama/labels/model%20request",
"name": "model request",
"color": "1E5DE6",
"default": false,
"description": "Model requests"
}
] | closed | false | null | [] | null | 3 | 2024-06-13T04:06:14 | 2024-07-09T16:26:00 | 2024-07-09T16:25:59 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | from openai import OpenAI
client = OpenAI(
base_url='http://localhost:11434/v1/',
# required but ignored
api_key='ollama',
)
chat_completion = client.chat.completions.create(
messages=[
{
'role': 'user',
'content': 'Say this is a test',
}
],
model='llama3',
)
In this code, how should I set the "keep alive": -1 request parameter to prevent the model from being released without a request for 5 minutes? | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5013/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5013/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/350 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/350/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/350/comments | https://api.github.com/repos/ollama/ollama/issues/350/events | https://github.com/ollama/ollama/pull/350 | 1,850,684,186 | PR_kwDOJ0Z1Ps5X7j7T | 350 | update llama.cpp | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2023-08-14T23:09:45 | 2023-08-14T23:15:52 | 2023-08-14T23:15:52 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/350",
"html_url": "https://github.com/ollama/ollama/pull/350",
"diff_url": "https://github.com/ollama/ollama/pull/350.diff",
"patch_url": "https://github.com/ollama/ollama/pull/350.patch",
"merged_at": "2023-08-14T23:15:52"
} | null | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/350/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/350/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/6760 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6760/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6760/comments | https://api.github.com/repos/ollama/ollama/issues/6760/events | https://github.com/ollama/ollama/pull/6760 | 2,520,525,018 | PR_kwDOJ0Z1Ps57NEi5 | 6,760 | IBM granite/granitemoe architecture support | {
"login": "gabe-l-hart",
"id": 1254484,
"node_id": "MDQ6VXNlcjEyNTQ0ODQ=",
"avatar_url": "https://avatars.githubusercontent.com/u/1254484?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/gabe-l-hart",
"html_url": "https://github.com/gabe-l-hart",
"followers_url": "https://api.github.com/users/gabe-l-hart/followers",
"following_url": "https://api.github.com/users/gabe-l-hart/following{/other_user}",
"gists_url": "https://api.github.com/users/gabe-l-hart/gists{/gist_id}",
"starred_url": "https://api.github.com/users/gabe-l-hart/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/gabe-l-hart/subscriptions",
"organizations_url": "https://api.github.com/users/gabe-l-hart/orgs",
"repos_url": "https://api.github.com/users/gabe-l-hart/repos",
"events_url": "https://api.github.com/users/gabe-l-hart/events{/privacy}",
"received_events_url": "https://api.github.com/users/gabe-l-hart/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 11 | 2024-09-11T18:59:32 | 2024-10-21T04:39:35 | 2024-10-17T18:59:52 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/6760",
"html_url": "https://github.com/ollama/ollama/pull/6760",
"diff_url": "https://github.com/ollama/ollama/pull/6760.diff",
"patch_url": "https://github.com/ollama/ollama/pull/6760.patch",
"merged_at": "2024-10-17T18:59:52"
} | ## Special Note
Since this PR bumps `llama.cpp` past the tip of `master` (`6026da52` as of writing this), it includes the recent changes to overhaul `sampling` and logging. I updated `server.cpp` so that it compiles and can run the models successfully. I also updated all of the patches to apply to the updated `llama.cpp` codebase.
## Dependencies
~~UPDATE: This PR no longer has dependencies. The first `llama.cpp` PR has been merged to support `granite`, and given our hope to release soon, we'd like to get this merged without `granitemoe` support and add that in a follow-up PR.~~
UPDATE 2: Both `granite` and `granitemoe` are now supported in `llama.cpp`. I've rebased the PR to include them (and to pick up support for `chameleon`).
~~This PR is dependent on two PRs in `llama.cpp`:~~
* [x] Support for `granite`: https://github.com/ggerganov/llama.cpp/pull/9412
* [x] Support for `granitemoe`: https://github.com/ggerganov/llama.cpp/pull/9438
~~Currently, the branch will not build since the submodule points to a commit on my fork and I have not changed the remote url. Once the `llama.cpp` PRs are merged, I will update the submodule pointer to the mainline.~~
## Description
This PR adds support for IBM's `granite` architecture. See the `llama.cpp` PRs for full details on the added architectures.
## Testing
In order to test this while it's in draft, I did the following:
```sh
# Download the IBM research experimental models (need huggingface-cli in python)
huggingface-cli download ibm/PowerLM-3b --local-dir $HOME/models/powerlm-3b
huggingface-cli download ibm/PowerMoE-3b --local-dir $HOME/models/powermoe-3b
# Convert to GGUF using the latest version of llama.cpp (I'm doing it here in the submodule)
cd llm/llama.cpp
pip install -r requirements/requirements-convert_hf_to_gguf.txt
python convert_to_gguf.py $HOME/models/powerlm-3b
python convert_to_gguf.py $HOME/models/powermoe-3b
cd -
# Build the llama-quantize binary in the submodule
cd llm/build/darwin/arm64_static/
make llama-quantize -j
cd -
# Quantize with the locally built llama-quantize
./llm/build/darwin/arm64_static/bin/llama-quantize $HOME/models/powerlm-3b Q4_K_M
./llm/build/darwin/arm64_static/bin/llama-quantize $HOME/models/powermoe-3b Q4_K_M
# Import to ollama (finally!)
echo "FROM $HOME/models/powerlm-3b/ggml-model-Q4_K_M.gguf" > Modelfile.powerlm-3b
./ollama create -f Modelfile.powerlm-3b powerlm:3b
echo "FROM $HOME/models/powermoe-3b/ggml-model-Q4_K_M.gguf" > Modelfile.powermoe-3b
./ollama create -f Modelfile.powermoe-3b powermoe:3b
```
<details>
<summary>Old instructions for building from my fork</summary>
**build ollama**
```sh
# Add my personal fork as a remote in the submodule
cd llm/llama.cpp
git remote add gabe https://github.com/gabe-l-hart/llama.cpp.git
git fetch gabe
cd -
# Generate and build like normal
go generate ./...
go build .
```
</details> | {
"login": "jessegross",
"id": 6468499,
"node_id": "MDQ6VXNlcjY0Njg0OTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jessegross",
"html_url": "https://github.com/jessegross",
"followers_url": "https://api.github.com/users/jessegross/followers",
"following_url": "https://api.github.com/users/jessegross/following{/other_user}",
"gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jessegross/subscriptions",
"organizations_url": "https://api.github.com/users/jessegross/orgs",
"repos_url": "https://api.github.com/users/jessegross/repos",
"events_url": "https://api.github.com/users/jessegross/events{/privacy}",
"received_events_url": "https://api.github.com/users/jessegross/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6760/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6760/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/7135 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7135/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7135/comments | https://api.github.com/repos/ollama/ollama/issues/7135/events | https://github.com/ollama/ollama/issues/7135 | 2,573,472,075 | I_kwDOJ0Z1Ps6ZZBFL | 7,135 | use the macOS electron app for Windows and Linux | {
"login": "hichemfantar",
"id": 34947993,
"node_id": "MDQ6VXNlcjM0OTQ3OTkz",
"avatar_url": "https://avatars.githubusercontent.com/u/34947993?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/hichemfantar",
"html_url": "https://github.com/hichemfantar",
"followers_url": "https://api.github.com/users/hichemfantar/followers",
"following_url": "https://api.github.com/users/hichemfantar/following{/other_user}",
"gists_url": "https://api.github.com/users/hichemfantar/gists{/gist_id}",
"starred_url": "https://api.github.com/users/hichemfantar/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/hichemfantar/subscriptions",
"organizations_url": "https://api.github.com/users/hichemfantar/orgs",
"repos_url": "https://api.github.com/users/hichemfantar/repos",
"events_url": "https://api.github.com/users/hichemfantar/events{/privacy}",
"received_events_url": "https://api.github.com/users/hichemfantar/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | open | false | null | [] | null | 0 | 2024-10-08T14:59:07 | 2024-10-08T15:03:18 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I don't understand why the electron app is only for macOS when electron is perfectly capable of running on Windows and Linux.
features like #7097 can easily be adopted for all platforms if electron is used. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7135/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7135/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/3944 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3944/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3944/comments | https://api.github.com/repos/ollama/ollama/issues/3944/events | https://github.com/ollama/ollama/issues/3944 | 2,265,803,830 | I_kwDOJ0Z1Ps6HDWw2 | 3,944 | /api/embeddings hangs when prompt is only whitespace | {
"login": "alexmavr",
"id": 680441,
"node_id": "MDQ6VXNlcjY4MDQ0MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/680441?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alexmavr",
"html_url": "https://github.com/alexmavr",
"followers_url": "https://api.github.com/users/alexmavr/followers",
"following_url": "https://api.github.com/users/alexmavr/following{/other_user}",
"gists_url": "https://api.github.com/users/alexmavr/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alexmavr/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alexmavr/subscriptions",
"organizations_url": "https://api.github.com/users/alexmavr/orgs",
"repos_url": "https://api.github.com/users/alexmavr/repos",
"events_url": "https://api.github.com/users/alexmavr/events{/privacy}",
"received_events_url": "https://api.github.com/users/alexmavr/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 5 | 2024-04-26T13:34:09 | 2024-06-29T22:53:16 | 2024-06-29T22:53:16 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
The following invocation hangs indefinitely:
```
$ curl http://localhost:11434/api/embeddings -d '{
"model": "all-minilm",
"prompt": " "
}'
```
Same behavior for model "mxbai-embed-large"
Relevant debug logs:
```
{"function":"process_single_task","level":"INFO","line":1510,"msg":"slot data","n_idle_slots":1,"n_processing_slots":0,"task_id":6,"tid":"0x1ea27fac0","timestamp":1714138666}
{"function":"log_server_request","level":"INFO","line":2741,"method":"GET","msg":"request","params":{},"path":"/health","remote_addr":"127.0.0.1","remote_port":53684,"status":200,"tid":"0x16be2b000","timestamp":1714138666}
{"function":"process_single_task","level":"INFO","line":1510,"msg":"slot data","n_idle_slots":1,"n_processing_slots":0,"task_id":7,"tid":"0x1ea27fac0","timestamp":1714138666}
{"function":"log_server_request","level":"INFO","line":2741,"method":"GET","msg":"request","params":{},"path":"/health","remote_addr":"127.0.0.1","remote_port":53689,"status":200,"tid":"0x16beb7000","timestamp":1714138666}
{"function":"launch_slot_with_data","level":"INFO","line":833,"msg":"slot is processing task","slot_id":0,"task_id":8,"tid":"0x1ea27fac0","timestamp":1714138666}
{"function":"update_slots","level":"INFO","line":1840,"msg":"kv cache rm [p0, end)","p0":0,"slot_id":0,"task_id":8,"tid":"0x1ea27fac0","timestamp":1714138666}
```
### OS
macOS
### GPU
Apple
### CPU
Apple
### Ollama version
0.1.32 | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3944/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3944/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8686 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8686/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8686/comments | https://api.github.com/repos/ollama/ollama/issues/8686/events | https://github.com/ollama/ollama/issues/8686 | 2,820,001,072 | I_kwDOJ0Z1Ps6oFc0w | 8,686 | Support Deepseek Janus Pro Series (7B & 1B) | {
"login": "zytoh0",
"id": 90326544,
"node_id": "MDQ6VXNlcjkwMzI2NTQ0",
"avatar_url": "https://avatars.githubusercontent.com/u/90326544?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/zytoh0",
"html_url": "https://github.com/zytoh0",
"followers_url": "https://api.github.com/users/zytoh0/followers",
"following_url": "https://api.github.com/users/zytoh0/following{/other_user}",
"gists_url": "https://api.github.com/users/zytoh0/gists{/gist_id}",
"starred_url": "https://api.github.com/users/zytoh0/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/zytoh0/subscriptions",
"organizations_url": "https://api.github.com/users/zytoh0/orgs",
"repos_url": "https://api.github.com/users/zytoh0/repos",
"events_url": "https://api.github.com/users/zytoh0/events{/privacy}",
"received_events_url": "https://api.github.com/users/zytoh0/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5789807732,
"node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA",
"url": "https://api.github.com/repos/ollama/ollama/labels/model%20request",
"name": "model request",
"color": "1E5DE6",
"default": false,
"description": "Model requests"
}
] | open | false | null | [] | null | 2 | 2025-01-30T06:17:54 | 2025-01-30T08:28:58 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hello, good day to you all. I would like to request that ollama add support for Deepseek Janus Pro Series (currently only 7B & 1B):
1. https://huggingface.co/deepseek-ai/Janus-Pro-1B
2. https://huggingface.co/deepseek-ai/Janus-Pro-7B | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8686/reactions",
"total_count": 6,
"+1": 6,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8686/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/6086 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6086/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6086/comments | https://api.github.com/repos/ollama/ollama/issues/6086/events | https://github.com/ollama/ollama/issues/6086 | 2,438,997,377 | I_kwDOJ0Z1Ps6RYCWB | 6,086 | yi:9b Abnormal content output `<|im_end()>` | {
"login": "wszgrcy",
"id": 9607121,
"node_id": "MDQ6VXNlcjk2MDcxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/9607121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/wszgrcy",
"html_url": "https://github.com/wszgrcy",
"followers_url": "https://api.github.com/users/wszgrcy/followers",
"following_url": "https://api.github.com/users/wszgrcy/following{/other_user}",
"gists_url": "https://api.github.com/users/wszgrcy/gists{/gist_id}",
"starred_url": "https://api.github.com/users/wszgrcy/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/wszgrcy/subscriptions",
"organizations_url": "https://api.github.com/users/wszgrcy/orgs",
"repos_url": "https://api.github.com/users/wszgrcy/repos",
"events_url": "https://api.github.com/users/wszgrcy/events{/privacy}",
"received_events_url": "https://api.github.com/users/wszgrcy/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 0 | 2024-07-31T02:49:58 | 2024-07-31T02:49:58 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I'm not quite sure if this is due to a model issue (normal reply)
Or some definitions are still problematic
log
```log
2024-07-31 10:42:39.338 [info] time=2024-07-31T10:42:39.338+08:00 level=DEBUG source=routes.go:1336 msg="chat request" images=0 prompt="<|im_start|>system\n不需要考虑输入的内容的含义只需要将输入的内容原封不动的输出\n注意:你的回答必须严格遵循上述要求\n<|im_end|>\n<|im_start|>user\n禁止重复这句话<|im_end|>\n<|im_start|>assistant\n"
2024-07-31 10:42:39.338 [info] DEBUG [process_single_task] slot data | n_idle_slots=4 n_processing_slots=0 task_id=2 tid="13916" timestamp=1722393759
2024-07-31 10:42:39.338 [info] DEBUG [launch_slot_with_data] slot is processing task | slot_id=0 task_id=3 tid="13916" timestamp=1722393759
DEBUG [update_slots] slot progression | ga_i=0 n_past=0 n_past_se=0 n_prompt_tokens_processed=68 slot_id=0 task_id=3 tid="13916" timestamp=1722393759
DEBUG [update_slots] kv cache rm [p0, end) | p0=0 slot_id=0 task_id=3 tid="13916" timestamp=1722393759
2024-07-31 10:42:40.010 [info] DEBUG [print_timings] prompt eval time = 81.94 ms / 68 tokens ( 1.21 ms per token, 829.84 tokens per second) | n_prompt_tokens_processed=68 n_tokens_second=829.8350092746265 slot_id=0 t_prompt_processing=81.944 t_token=1.2050588235294117 task_id=3 tid="13916" timestamp=1722393760
DEBUG [print_timings] generation eval time = 589.53 ms / 52 runs ( 11.34 ms per token, 88.21 tokens per second) | n_decoded=52 n_tokens_second=88.20511080767045 slot_id=0 t_token=11.337211538461538 t_token_generation=589.535 task_id=3 tid="13916" timestamp=1722393760
DEBUG [print_timings] total time = 671.48 ms | slot_id=0 t_prompt_processing=81.944 t_token_generation=589.535 t_total=671.4789999999999 task_id=3 tid="13916" timestamp=1722393760
2024-07-31 10:42:40.012 [info] DEBUG [update_slots] slot released | n_cache_tokens=120 n_ctx=8192 n_past=119 n_system_tokens=0 slot_id=0 task_id=3 tid="13916" timestamp=1722393760 truncated=false
DEBUG [log_server_request] request | method="POST" params={} path="/completion" remote_addr="127.0.0.1" remote_port=54247 status=200 tid="19048" timestamp=1722393760
[GIN] 2024/07/31 - 10:42:40 | 200 | 5.1105869s | 127.0.0.1 | POST "/v1/chat/completions"
2024-07-31 10:42:40.013 [info] time=2024-07-31T10:42:40.010+08:00 level=DEBUG source=sched.go:454 msg="context for request finished"
time=2024-07-31T10:42:40.010+08:00 level=DEBUG source=sched.go:326 msg="runner with non-zero duration has gone idle, adding timer" modelPath=C:\Users\chen\.ollama\models\blobs\sha256-59b1e2a78cb0a5b8f7ba25e459ee17788242cd996e1f62a295dcbd65e5344b11 duration=5m0s
time=2024-07-31T10:42:40.010+08:00 level=DEBUG source=sched.go:344 msg="after processing request finished event" modelPath=C:\Users\chen\.ollama\models\blobs\sha256-59b1e2a78cb0a5b8f7ba25e459ee17788242cd996e1f62a295dcbd65e5344b11 refCount=0
```
result
```
禁止重复这句话<|im_end()>
请注意,根据您的指令,我将原封不动地重复您提供的文本:“禁止重复这句话”。如果您有其他问题或需要帮助,请告诉我。
```
log2
```
2024-07-31 10:49:18.444 [info] time=2024-07-31T10:49:18.444+08:00 level=DEBUG source=routes.go:1336 msg="chat request" images=0 prompt="<|im_start|>system\n不需要考虑输入的内容的含义只需要将输入的内容原封不动的输出\n注意:你的回答必须严格遵循上述要求\n<|im_end|>\n<|im_start|>user\n1+1等于几?<|im_end|>\n<|im_start|>assistant\n"
2024-07-31 10:49:18.444 [info] DEBUG [process_single_task] slot data | n_idle_slots=4 n_processing_slots=0 task_id=428 tid="13916" timestamp=1722394158
2024-07-31 10:49:18.445 [info] DEBUG [prefix_slot] slot with common prefix found | 0=["slot_id",0,"characters",233]
DEBUG [launch_slot_with_data] slot is processing task | slot_id=0 task_id=429 tid="13916" timestamp=1722394158
2024-07-31 10:49:18.445 [info] DEBUG [update_slots] slot progression | ga_i=0 n_past=71 n_past_se=0 n_prompt_tokens_processed=71 slot_id=0 task_id=429 tid="13916" timestamp=1722394158
DEBUG [update_slots] we have to evaluate at least 1 token to generate logits | slot_id=0 task_id=429 tid="13916" timestamp=1722394158
DEBUG [update_slots] kv cache rm [p0, end) | p0=70 slot_id=0 task_id=429 tid="13916" timestamp=1722394158
2024-07-31 10:49:18.616 [info] DEBUG [print_timings] prompt eval time = 19.07 ms / 71 tokens ( 0.27 ms per token, 3722.34 tokens per second) | n_prompt_tokens_processed=71 n_tokens_second=3722.3445527943795 slot_id=0 t_prompt_processing=19.074 t_token=0.2686478873239437 task_id=429 tid="13916" timestamp=1722394158
DEBUG [print_timings] generation eval time = 151.99 ms / 14 runs ( 10.86 ms per token, 92.11 tokens per second) | n_decoded=14 n_tokens_second=92.11011105847676 slot_id=0 t_token=10.856571428571428 t_token_generation=151.992 task_id=429 tid="13916" timestamp=1722394158
DEBUG [print_timings] total time = 171.07 ms | slot_id=0 t_prompt_processing=19.074 t_token_generation=151.992 t_total=171.066 task_id=429 tid="13916" timestamp=1722394158
2024-07-31 10:49:18.617 [info] time=2024-07-31T10:49:18.616+08:00 level=DEBUG source=sched.go:395 msg="context for request finished"
time=2024-07-31T10:49:18.616+08:00 level=DEBUG source=sched.go:326 msg="runner with non-zero duration has gone idle, adding timer" modelPath=C:\Users\chen\.ollama\models\blobs\sha256-59b1e2a78cb0a5b8f7ba25e459ee17788242cd996e1f62a295dcbd65e5344b11 duration=5m0s
time=2024-07-31T10:49:18.616+08:00 level=DEBUG source=sched.go:344 msg="after processing request finished event" modelPath=C:\Users\chen\.ollama\models\blobs\sha256-59b1e2a78cb0a5b8f7ba25e459ee17788242cd996e1f62a295dcbd65e5344b11 refCount=0
2024-07-31 10:49:18.617 [info] DEBUG [update_slots] slot released | n_cache_tokens=85 n_ctx=8192 n_past=84 n_system_tokens=0 slot_id=0 task_id=429 tid="13916" timestamp=1722394158 truncated=false
DEBUG [log_server_request] request | method="POST" params={} path="/completion" remote_addr="127.0.0.1" remote_port=54416 status=200 tid="17736" timestamp=1722394158
[GIN] 2024/07/31 - 10:49:18 | 200 | 180.1991ms | 127.0.0.1 | POST "/v1/chat/completions"
```
result2
```
1+1等于2。<|im_end()>
```
### OS
Windows
### GPU
AMD
### CPU
AMD
### Ollama version
0.3.0 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6086/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6086/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/6539 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6539/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6539/comments | https://api.github.com/repos/ollama/ollama/issues/6539/events | https://github.com/ollama/ollama/pull/6539 | 2,490,646,168 | PR_kwDOJ0Z1Ps55o4AN | 6,539 | fix: validate modelpath | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-08-28T00:58:32 | 2024-08-28T21:38:28 | 2024-08-28T21:38:27 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/6539",
"html_url": "https://github.com/ollama/ollama/pull/6539",
"diff_url": "https://github.com/ollama/ollama/pull/6539.diff",
"patch_url": "https://github.com/ollama/ollama/pull/6539.patch",
"merged_at": "2024-08-28T21:38:27"
} | ensure model path resolves to a local path | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6539/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6539/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/535 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/535/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/535/comments | https://api.github.com/repos/ollama/ollama/issues/535/events | https://github.com/ollama/ollama/pull/535 | 1,899,070,851 | PR_kwDOJ0Z1Ps5aeaCZ | 535 | only add a layer if there is actual data | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2023-09-15T20:59:14 | 2023-09-18T20:47:46 | 2023-09-18T20:47:46 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/535",
"html_url": "https://github.com/ollama/ollama/pull/535",
"diff_url": "https://github.com/ollama/ollama/pull/535.diff",
"patch_url": "https://github.com/ollama/ollama/pull/535.patch",
"merged_at": "2023-09-18T20:47:46"
} | This is a simple change which checks the layer size before adding it to the overall model. Registry balks if you try to send it an empty layer on an `ollama push`.
| {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/535/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/535/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/3185 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3185/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3185/comments | https://api.github.com/repos/ollama/ollama/issues/3185/events | https://github.com/ollama/ollama/issues/3185 | 2,190,192,859 | I_kwDOJ0Z1Ps6Ci7Db | 3,185 | ollama doesn't distribute notice licenses in its release artifacts | {
"login": "jart",
"id": 49262,
"node_id": "MDQ6VXNlcjQ5MjYy",
"avatar_url": "https://avatars.githubusercontent.com/u/49262?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jart",
"html_url": "https://github.com/jart",
"followers_url": "https://api.github.com/users/jart/followers",
"following_url": "https://api.github.com/users/jart/following{/other_user}",
"gists_url": "https://api.github.com/users/jart/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jart/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jart/subscriptions",
"organizations_url": "https://api.github.com/users/jart/orgs",
"repos_url": "https://api.github.com/users/jart/repos",
"events_url": "https://api.github.com/users/jart/events{/privacy}",
"received_events_url": "https://api.github.com/users/jart/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 0 | 2024-03-16T19:13:26 | 2024-03-21T08:42:52 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
ollama uses projects like llama.cpp as a statically linked dependency. The terms of the MIT license require that it distribute the copyright notice in both source and binary form. Yet if I `grep` for "Georgi Gerganov" on my Linux and Windows installation folders for ollama, the copyright notices are nowhere to be found. This is a violation of the terms of the license which should be rectified.
### What did you expect to see?
I expected the copyright notices of dependent projects to be at minimum present within the binary artifacts. Some people online are of the opinion that a mention of llama.cpp should be present in places like the README as well, although the license doesn't strictly require that.
### Steps to reproduce
_No response_
### Are there any recent changes that introduced the issue?
```
grep -iR 'Georgi Gerganov' AppData/Local/Programs/Ollama/
grep -R 'Georgi Gerganov' /usr/local/bin/ollama
etc.
```
### OS
Other
### Architecture
Other
### Platform
_No response_
### Ollama version
_No response_
### GPU
_No response_
### GPU info
_No response_
### CPU
_No response_
### Other software
_No response_ | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3185/reactions",
"total_count": 27,
"+1": 27,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3185/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/6727 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6727/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6727/comments | https://api.github.com/repos/ollama/ollama/issues/6727/events | https://github.com/ollama/ollama/issues/6727 | 2,516,529,897 | I_kwDOJ0Z1Ps6V_zLp | 6,727 | Does ollama check for free disk space BEFORE pulling a new model? | {
"login": "bulrush15",
"id": 7031486,
"node_id": "MDQ6VXNlcjcwMzE0ODY=",
"avatar_url": "https://avatars.githubusercontent.com/u/7031486?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bulrush15",
"html_url": "https://github.com/bulrush15",
"followers_url": "https://api.github.com/users/bulrush15/followers",
"following_url": "https://api.github.com/users/bulrush15/following{/other_user}",
"gists_url": "https://api.github.com/users/bulrush15/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bulrush15/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bulrush15/subscriptions",
"organizations_url": "https://api.github.com/users/bulrush15/orgs",
"repos_url": "https://api.github.com/users/bulrush15/repos",
"events_url": "https://api.github.com/users/bulrush15/events{/privacy}",
"received_events_url": "https://api.github.com/users/bulrush15/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | null | [] | null | 1 | 2024-09-10T13:45:19 | 2024-09-12T00:39:42 | 2024-09-12T00:39:42 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I actually have 3 drives in my Windows 11 system.
Does ollama check for free disk on the drive it's installed space BEFORE Pulling a new model? Before it pulls a model it should check that the user has at least 2-3GB of free disk space after pulling the model. If the user doesn't have that, then ollama should show an error message and links on how to clean up browser caches, temp files, etc.
| {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6727/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6727/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/7833 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7833/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7833/comments | https://api.github.com/repos/ollama/ollama/issues/7833/events | https://github.com/ollama/ollama/pull/7833 | 2,692,437,708 | PR_kwDOJ0Z1Ps6DGp7e | 7,833 | server: fix proxy not being set from environment | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-11-25T22:29:45 | 2024-11-26T00:10:26 | 2024-11-26T00:10:26 | MEMBER | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/7833",
"html_url": "https://github.com/ollama/ollama/pull/7833",
"diff_url": "https://github.com/ollama/ollama/pull/7833.diff",
"patch_url": "https://github.com/ollama/ollama/pull/7833.patch",
"merged_at": null
} | Fixes https://github.com/ollama/ollama/issues/7829
Fixes https://github.com/ollama/ollama/issues/7788 | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7833/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7833/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/2173 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2173/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2173/comments | https://api.github.com/repos/ollama/ollama/issues/2173/events | https://github.com/ollama/ollama/issues/2173 | 2,098,751,675 | I_kwDOJ0Z1Ps59GGi7 | 2,173 | Issues with OllamaEmbedding | {
"login": "RonHein",
"id": 27790393,
"node_id": "MDQ6VXNlcjI3NzkwMzkz",
"avatar_url": "https://avatars.githubusercontent.com/u/27790393?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/RonHein",
"html_url": "https://github.com/RonHein",
"followers_url": "https://api.github.com/users/RonHein/followers",
"following_url": "https://api.github.com/users/RonHein/following{/other_user}",
"gists_url": "https://api.github.com/users/RonHein/gists{/gist_id}",
"starred_url": "https://api.github.com/users/RonHein/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/RonHein/subscriptions",
"organizations_url": "https://api.github.com/users/RonHein/orgs",
"repos_url": "https://api.github.com/users/RonHein/repos",
"events_url": "https://api.github.com/users/RonHein/events{/privacy}",
"received_events_url": "https://api.github.com/users/RonHein/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5895046125,
"node_id": "LA_kwDOJ0Z1Ps8AAAABX19D7Q",
"url": "https://api.github.com/repos/ollama/ollama/labels/integration",
"name": "integration",
"color": "92E43A",
"default": false,
"description": ""
}
] | closed | false | null | [] | null | 3 | 2024-01-24T17:27:51 | 2024-05-10T23:32:34 | 2024-05-10T23:32:34 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hi, I am having trouble using OllamaEmbedding. I am unable to retrieve the correct vectors and the the similarity score is really high. I was able to get the correct vectors with OpenAIEmbedding but I am hoping to get OllamaEmbedding working. Is there something that I am missing?
Below is a simple loader with chromadb using OllamaEmbedding.
`from langchain.document_loaders import PyPDFLoader, UnstructuredExcelLoader, Docx2TxtLoader, BSHTMLLoader, TextLoader
from langchain.embeddings import OllamaEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
def chunk():
loader = TextLoader('./samples/facts.txt')
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=128
chunk_overlap = 20
)
docs = loader.load_and_split(
text_splitter=text_splitter
)
return docs
def create_embedding():
docs = chunk()
embeddings = OllamaEmbeddings()
db = Chroma.from_documents(
docs,
embedding=embeddings,
persist_directory="./samples/docs/chroma",
)
results = db.similarity_search_with_score("What is an interesting fact about the English language?")
print("~~~~similarity_search_with_score~~~~"
for result in results:
print("\n")
print(result[1])
print(result[0].page_content)
`
This is the output:
8292.622553378074
16. Queen Elizabeth II is the longest-reigning current monarch.
17. The Leaning Tower of Pisa took 200 years to construct.
8386.487814338176
6. The elephant is the only mammal that can't jump.
7. The letter 'Q' is the only letter not appearing in any U.S. state name.
8529.430614665867
34. The shortest war in history was between Britain and Zanzibar on August 27, 1896. Zanzibar surrendered after 38 minutes.
8711.880867153133
50. Canada has more lakes than the rest of the world combined.
51. 10% of the world's population is left-handed.
| {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2173/reactions",
"total_count": 4,
"+1": 4,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/2173/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3151 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3151/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3151/comments | https://api.github.com/repos/ollama/ollama/issues/3151/events | https://github.com/ollama/ollama/issues/3151 | 2,187,206,177 | I_kwDOJ0Z1Ps6CXh4h | 3,151 | Doubt about openai compatibility with temperature parameter | {
"login": "ejgutierrez74",
"id": 11474846,
"node_id": "MDQ6VXNlcjExNDc0ODQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/11474846?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ejgutierrez74",
"html_url": "https://github.com/ejgutierrez74",
"followers_url": "https://api.github.com/users/ejgutierrez74/followers",
"following_url": "https://api.github.com/users/ejgutierrez74/following{/other_user}",
"gists_url": "https://api.github.com/users/ejgutierrez74/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ejgutierrez74/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ejgutierrez74/subscriptions",
"organizations_url": "https://api.github.com/users/ejgutierrez74/orgs",
"repos_url": "https://api.github.com/users/ejgutierrez74/repos",
"events_url": "https://api.github.com/users/ejgutierrez74/events{/privacy}",
"received_events_url": "https://api.github.com/users/ejgutierrez74/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2024-03-14T20:22:49 | 2024-03-15T18:33:55 | 2024-03-15T01:41:57 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I have one doubt, about the use of temperature.
As i have understood, temperature in llama2 is from 0.0 to 1.0.
But if you use chat.completion from openai ( https://github.com/ollama/ollama/blob/main/docs/openai.md), if you read the documentation temperature values range from 0.0 to 2.0, so seems a little mismatch.
For example if you write temperature 1.0, in llama2 would be the more random parameter, but in case of openai, would be medium ( the max should be 2.0)
So when i use:
```
chat_completion = client.chat.completions.create(
messages=[
{
'role': 'user',
'content': 'Say this is a test',
}
],
model='llama2',
temperature=1.0
)
```
What should i expect, medium randomness as openai documentation, or full random as llama2
Thanks in advance | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3151/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3151/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5682 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5682/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5682/comments | https://api.github.com/repos/ollama/ollama/issues/5682/events | https://github.com/ollama/ollama/issues/5682 | 2,407,159,821 | I_kwDOJ0Z1Ps6PelgN | 5,682 | Add model metadata which indicated model purpose to /api/tags endpoint. | {
"login": "CannonFodderr",
"id": 36086310,
"node_id": "MDQ6VXNlcjM2MDg2MzEw",
"avatar_url": "https://avatars.githubusercontent.com/u/36086310?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/CannonFodderr",
"html_url": "https://github.com/CannonFodderr",
"followers_url": "https://api.github.com/users/CannonFodderr/followers",
"following_url": "https://api.github.com/users/CannonFodderr/following{/other_user}",
"gists_url": "https://api.github.com/users/CannonFodderr/gists{/gist_id}",
"starred_url": "https://api.github.com/users/CannonFodderr/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/CannonFodderr/subscriptions",
"organizations_url": "https://api.github.com/users/CannonFodderr/orgs",
"repos_url": "https://api.github.com/users/CannonFodderr/repos",
"events_url": "https://api.github.com/users/CannonFodderr/events{/privacy}",
"received_events_url": "https://api.github.com/users/CannonFodderr/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 7706482389,
"node_id": "LA_kwDOJ0Z1Ps8AAAABy1eW1Q",
"url": "https://api.github.com/repos/ollama/ollama/labels/api",
"name": "api",
"color": "bfdadc",
"default": false,
"description": ""
}
] | open | false | null | [] | null | 1 | 2024-07-13T20:49:33 | 2024-11-06T01:07:04 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | It would be nice if part of the **/api/tags** response detals would include metadata such as:
`type: embedding | general | code | math | vision | audio` etc...
`languages: [en-US, ...]`
This could help with model sorting and selection.
If metadata is not available maybe add an option to mark it with metadata by the user. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5682/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5682/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/5049 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5049/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5049/comments | https://api.github.com/repos/ollama/ollama/issues/5049/events | https://github.com/ollama/ollama/pull/5049 | 2,354,096,366 | PR_kwDOJ0Z1Ps5yhXVZ | 5,049 | Cuda v12 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 9 | 2024-06-14T20:56:22 | 2024-08-20T18:06:58 | 2024-08-19T18:14:24 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5049",
"html_url": "https://github.com/ollama/ollama/pull/5049",
"diff_url": "https://github.com/ollama/ollama/pull/5049.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5049.patch",
"merged_at": "2024-08-19T18:14:24"
} | This builds upon the new linux packaging model in #5631 to support building 2 different CUDA runners: v11 for support going back to CC 5.0, and v12 for CC 6.0 and up GPUs. This allows us to start enabling new features such as `GGML_CUDA_USE_GRAPHS` which require cuda v12 support without dropping support for older GPUs.
Fixes #4958
Fixes #5737
Fixes #2361
Fixes #6144
Resulting sizes:
```
% ls -lh dist/*.xz
-rw-r--r-- 1 daniel staff 1.4G Aug 12 11:43 dist/ollama-linux-amd64.tar.xz
-rw-r--r-- 1 daniel staff 1.5G Aug 12 12:11 dist/ollama-linux-arm64.tar.xz
```
```
time=2024-07-12T20:24:36.369Z level=INFO source=payload.go:44 msg="Dynamic LLM libraries [cpu_avx cpu_avx2 cuda_v11 cuda_v12 rocm_v60101 cpu]"
```
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5049/reactions",
"total_count": 3,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 3,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5049/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5971 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5971/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5971/comments | https://api.github.com/repos/ollama/ollama/issues/5971/events | https://github.com/ollama/ollama/pull/5971 | 2,431,342,426 | PR_kwDOJ0Z1Ps52iH3p | 5,971 | Add template for llama3.1:70B model | {
"login": "eust-w",
"id": 39115651,
"node_id": "MDQ6VXNlcjM5MTE1NjUx",
"avatar_url": "https://avatars.githubusercontent.com/u/39115651?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/eust-w",
"html_url": "https://github.com/eust-w",
"followers_url": "https://api.github.com/users/eust-w/followers",
"following_url": "https://api.github.com/users/eust-w/following{/other_user}",
"gists_url": "https://api.github.com/users/eust-w/gists{/gist_id}",
"starred_url": "https://api.github.com/users/eust-w/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/eust-w/subscriptions",
"organizations_url": "https://api.github.com/users/eust-w/orgs",
"repos_url": "https://api.github.com/users/eust-w/repos",
"events_url": "https://api.github.com/users/eust-w/events{/privacy}",
"received_events_url": "https://api.github.com/users/eust-w/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-07-26T03:36:15 | 2024-08-14T16:39:38 | 2024-08-14T16:39:38 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5971",
"html_url": "https://github.com/ollama/ollama/pull/5971",
"diff_url": "https://github.com/ollama/ollama/pull/5971.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5971.patch",
"merged_at": null
} | - Added a new template for llama3.1:70B model in llama3.1-instruct.gotmpl.
- Updated index.json to include the new template configuration.
- Ensured compatibility with the existing llama3-instruct template structure.
This addition provides support for the llama3.1:70B model, allowing for more advanced model instructions and responses.
Resolves: #5967 | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5971/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5971/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5923 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5923/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5923/comments | https://api.github.com/repos/ollama/ollama/issues/5923/events | https://github.com/ollama/ollama/issues/5923 | 2,428,324,395 | I_kwDOJ0Z1Ps6QvUor | 5,923 | Slow Model Loading Speed on macOS System | {
"login": "ghost",
"id": 10137,
"node_id": "MDQ6VXNlcjEwMTM3",
"avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghost",
"html_url": "https://github.com/ghost",
"followers_url": "https://api.github.com/users/ghost/followers",
"following_url": "https://api.github.com/users/ghost/following{/other_user}",
"gists_url": "https://api.github.com/users/ghost/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghost/subscriptions",
"organizations_url": "https://api.github.com/users/ghost/orgs",
"repos_url": "https://api.github.com/users/ghost/repos",
"events_url": "https://api.github.com/users/ghost/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghost/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 3 | 2024-07-24T19:30:31 | 2024-07-28T10:12:41 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I am experiencing slow model loading speeds when using Ollama on my macOS system. Here are the specifications of my setup:
macOS Version: 14.5
Processor: M3 Max
Memory: 128GB
Storage: 2TB (with performance on par with the 8TB version)
Ollama version: 0.2.8
Despite having sufficient hardware capabilities, the model loading speed typically hovers around 700MB/s. During the loading process, I do not observe any component (CPU, Disk, Memory, GPU) being fully utilized or experiencing high usage. Could you please help me understand the reason for this bottleneck and suggest any potential solutions or optimizations?
Thank you for your assistance.
### OS
macOS
### GPU
Apple
### CPU
Apple
### Ollama version
0.2.8 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5923/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 1
} | https://api.github.com/repos/ollama/ollama/issues/5923/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/6140 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6140/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6140/comments | https://api.github.com/repos/ollama/ollama/issues/6140/events | https://github.com/ollama/ollama/issues/6140 | 2,444,428,226 | I_kwDOJ0Z1Ps6RswPC | 6,140 | unable to pull model | {
"login": "jdzhang1221",
"id": 29417118,
"node_id": "MDQ6VXNlcjI5NDE3MTE4",
"avatar_url": "https://avatars.githubusercontent.com/u/29417118?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jdzhang1221",
"html_url": "https://github.com/jdzhang1221",
"followers_url": "https://api.github.com/users/jdzhang1221/followers",
"following_url": "https://api.github.com/users/jdzhang1221/following{/other_user}",
"gists_url": "https://api.github.com/users/jdzhang1221/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jdzhang1221/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jdzhang1221/subscriptions",
"organizations_url": "https://api.github.com/users/jdzhang1221/orgs",
"repos_url": "https://api.github.com/users/jdzhang1221/repos",
"events_url": "https://api.github.com/users/jdzhang1221/events{/privacy}",
"received_events_url": "https://api.github.com/users/jdzhang1221/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 7 | 2024-08-02T08:47:52 | 2024-08-04T06:20:54 | 2024-08-02T14:59:05 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
ollama pull mxbai-embed-large
pulling manifest
Error: pull model manifest: Get "https://registry.ollama.ai/v2/library/mxbai-embed-large/manifests/latest": read tcp 192.168.1.94:50046->104.21.75.227:443: read: connection reset by peer
### OS
macOS
### GPU
Intel
### CPU
Intel
### Ollama version
0.3.2 | {
"login": "jdzhang1221",
"id": 29417118,
"node_id": "MDQ6VXNlcjI5NDE3MTE4",
"avatar_url": "https://avatars.githubusercontent.com/u/29417118?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jdzhang1221",
"html_url": "https://github.com/jdzhang1221",
"followers_url": "https://api.github.com/users/jdzhang1221/followers",
"following_url": "https://api.github.com/users/jdzhang1221/following{/other_user}",
"gists_url": "https://api.github.com/users/jdzhang1221/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jdzhang1221/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jdzhang1221/subscriptions",
"organizations_url": "https://api.github.com/users/jdzhang1221/orgs",
"repos_url": "https://api.github.com/users/jdzhang1221/repos",
"events_url": "https://api.github.com/users/jdzhang1221/events{/privacy}",
"received_events_url": "https://api.github.com/users/jdzhang1221/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6140/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6140/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5412 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5412/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5412/comments | https://api.github.com/repos/ollama/ollama/issues/5412/events | https://github.com/ollama/ollama/pull/5412 | 2,384,417,979 | PR_kwDOJ0Z1Ps50GhSA | 5,412 | Update README.md: Add Ollama-GUI to web & desktop | {
"login": "chyok",
"id": 32629225,
"node_id": "MDQ6VXNlcjMyNjI5MjI1",
"avatar_url": "https://avatars.githubusercontent.com/u/32629225?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/chyok",
"html_url": "https://github.com/chyok",
"followers_url": "https://api.github.com/users/chyok/followers",
"following_url": "https://api.github.com/users/chyok/following{/other_user}",
"gists_url": "https://api.github.com/users/chyok/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chyok/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chyok/subscriptions",
"organizations_url": "https://api.github.com/users/chyok/orgs",
"repos_url": "https://api.github.com/users/chyok/repos",
"events_url": "https://api.github.com/users/chyok/events{/privacy}",
"received_events_url": "https://api.github.com/users/chyok/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-07-01T17:51:05 | 2024-11-21T08:19:24 | 2024-11-21T08:19:24 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5412",
"html_url": "https://github.com/ollama/ollama/pull/5412",
"diff_url": "https://github.com/ollama/ollama/pull/5412.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5412.patch",
"merged_at": "2024-11-21T08:19:24"
} | Hi,
ollama-gui is a very simple client, implemented using the built-in Python tkinter library, with no additional dependencies. Provide with the simplest possible visual Ollama interface.
Repository:
https://github.com/chyok/ollama-gui
Screenshots(current version):
![ollama-gui-1 2 0](https://github.com/user-attachments/assets/a4bb979b-68a4-4062-b484-7542f2a866e0)
Hope it can be adopted, thanks | {
"login": "mchiang0610",
"id": 3325447,
"node_id": "MDQ6VXNlcjMzMjU0NDc=",
"avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mchiang0610",
"html_url": "https://github.com/mchiang0610",
"followers_url": "https://api.github.com/users/mchiang0610/followers",
"following_url": "https://api.github.com/users/mchiang0610/following{/other_user}",
"gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions",
"organizations_url": "https://api.github.com/users/mchiang0610/orgs",
"repos_url": "https://api.github.com/users/mchiang0610/repos",
"events_url": "https://api.github.com/users/mchiang0610/events{/privacy}",
"received_events_url": "https://api.github.com/users/mchiang0610/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5412/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5412/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/4813 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4813/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4813/comments | https://api.github.com/repos/ollama/ollama/issues/4813/events | https://github.com/ollama/ollama/issues/4813 | 2,333,483,407 | I_kwDOJ0Z1Ps6LFiGP | 4,813 | Support intel cpu | {
"login": "kannon92",
"id": 3780425,
"node_id": "MDQ6VXNlcjM3ODA0MjU=",
"avatar_url": "https://avatars.githubusercontent.com/u/3780425?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/kannon92",
"html_url": "https://github.com/kannon92",
"followers_url": "https://api.github.com/users/kannon92/followers",
"following_url": "https://api.github.com/users/kannon92/following{/other_user}",
"gists_url": "https://api.github.com/users/kannon92/gists{/gist_id}",
"starred_url": "https://api.github.com/users/kannon92/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/kannon92/subscriptions",
"organizations_url": "https://api.github.com/users/kannon92/orgs",
"repos_url": "https://api.github.com/users/kannon92/repos",
"events_url": "https://api.github.com/users/kannon92/events{/privacy}",
"received_events_url": "https://api.github.com/users/kannon92/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | open | false | null | [] | null | 2 | 2024-06-04T13:03:20 | 2024-06-04T20:06:14 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | https://github.com/ollama/ollama/pull/3897
It would be good to document how to support intel cpus (using both intel compilers and mkl). My PR demonstrates how to compile with intel but I was told that we should move this to a feature request.
im happy to help if I get some guidance on how this should go. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4813/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4813/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/8503 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8503/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8503/comments | https://api.github.com/repos/ollama/ollama/issues/8503/events | https://github.com/ollama/ollama/issues/8503 | 2,799,547,686 | I_kwDOJ0Z1Ps6m3bUm | 8,503 | Cannot overcome Ollama error : ollama._types.ResponseError: POST predict: Post "http://127.0.0.1:35843/completion": EOF / panic: failed to decode batch: could not find a kv cache slot | {
"login": "user-33948",
"id": 193742694,
"node_id": "U_kgDOC4xHZg",
"avatar_url": "https://avatars.githubusercontent.com/u/193742694?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/user-33948",
"html_url": "https://github.com/user-33948",
"followers_url": "https://api.github.com/users/user-33948/followers",
"following_url": "https://api.github.com/users/user-33948/following{/other_user}",
"gists_url": "https://api.github.com/users/user-33948/gists{/gist_id}",
"starred_url": "https://api.github.com/users/user-33948/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/user-33948/subscriptions",
"organizations_url": "https://api.github.com/users/user-33948/orgs",
"repos_url": "https://api.github.com/users/user-33948/repos",
"events_url": "https://api.github.com/users/user-33948/events{/privacy}",
"received_events_url": "https://api.github.com/users/user-33948/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 7 | 2025-01-20T15:18:43 | 2025-01-27T09:57:17 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I believe there is a bug in ollama's processing which flags the following two errors:
(result from running python file:) Ollama error : ollama._types.ResponseError: POST predict: Post "http://127.0.0.1:35843/completion": EOF
(error in ollama logs:) panic: failed to decode batch: could not find a kv cache slot
To recreate: I am using Ollama as my LLM to create a property graph in Llamaindex, repeatable following the code in this documentation: https://docs.llamaindex.ai/en/stable/examples/property_graph/property_graph_advanced/
Shortly after the code begins to run, it flags the above errors with Ollama. I confirmed with LlamaIndex the problem is with Ollama, and not Llamaindex (https://github.com/run-llama/llama_index/issues/17424). The LlamaIndex team linked my issue to (https://github.com/ollama/ollama/issues/7949).
I have Ollama_DEBUG set and have the error flagged from the python script and then the Ollama logs. Request your support in fixing this issue. I am currently running Ollama v 0.5.1, but have tried with 0.3.14 per other comments in GitHub and get the same error.
Here is the error from the python script:
Traceback (most recent call last):
File "/home/LlamaIndexTutorials/main.py", line 150, in <module>
index = PropertyGraphIndex.from_documents(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/base.py", line 119, in from_documents
return cls(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/property_graph/base.py", line 134, in __init__
super().__init__(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/base.py", line 77, in __init__
index_struct = self.build_index_from_nodes(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/base.py", line 185, in build_index_from_nodes
return self._build_index_from_nodes(nodes, **build_kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/property_graph/base.py", line 334, in _build_index_from_nodes
nodes = self._insert_nodes(nodes or [])
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/property_graph/base.py", line 200, in _insert_nodes
nodes = asyncio.run(
File "/home/.local/lib/python3.10/site-packages/nest_asyncio.py", line 30, in run
return loop.run_until_complete(task)
File "/home/.local/lib/python3.10/site-packages/nest_asyncio.py", line 98, in run_until_complete
return f.result()
File "/usr/lib/python3.10/asyncio/futures.py", line 201, in result
raise self._exception.with_traceback(self._exception_tb)
File "/usr/lib/python3.10/asyncio/tasks.py", line 232, in __step
result = coro.send(None)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/ingestion/pipeline.py", line 137, in arun_transformations
nodes = await transform.acall(nodes, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/property_graph/transformations/schema_llm.py", line 380, in acall
return await run_jobs(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/instrumentation/dispatcher.py", line 367, in async_wrapper
result = await func(*args, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/async_utils.py", line 146, in run_jobs
results = await tqdm_asyncio.gather(*pool_jobs, desc=desc)
File "/home/.local/lib/python3.10/site-packages/tqdm/asyncio.py", line 79, in gather
res = [await f for f in cls.as_completed(ifs, loop=loop, timeout=timeout,
File "/home/.local/lib/python3.10/site-packages/tqdm/asyncio.py", line 79, in <listcomp>
res = [await f for f in cls.as_completed(ifs, loop=loop, timeout=timeout,
File "/usr/lib/python3.10/asyncio/tasks.py", line 571, in _wait_for_one
return f.result() # May raise f.exception().
File "/usr/lib/python3.10/asyncio/futures.py", line 201, in result
raise self._exception.with_traceback(self._exception_tb)
File "/usr/lib/python3.10/asyncio/tasks.py", line 232, in __step
result = coro.send(None)
File "/home/.local/lib/python3.10/site-packages/tqdm/asyncio.py", line 76, in wrap_awaitable
return i, await f
File "/home/.local/lib/python3.10/site-packages/llama_index/core/instrumentation/dispatcher.py", line 367, in async_wrapper
result = await func(*args, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/async_utils.py", line 139, in worker
return await job
File "/home/.local/lib/python3.10/site-packages/llama_index/core/indices/property_graph/transformations/schema_llm.py", line 344, in _aextract
kg_schema = await self.llm.astructured_predict(
File "/home/.local/lib/python3.10/site-packages/llama_index/core/instrumentation/dispatcher.py", line 367, in async_wrapper
result = await func(*args, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/llms/ollama/base.py", line 521, in astructured_predict
response = await self.achat(messages, **llm_kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/instrumentation/dispatcher.py", line 367, in async_wrapper
result = await func(*args, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/core/llms/callbacks.py", line 75, in wrapped_async_llm_chat
f_return_val = await f(_self, messages, **kwargs)
File "/home/.local/lib/python3.10/site-packages/llama_index/llms/ollama/base.py", line 435, in achat
response = await self.async_client.chat(
File "/home/.local/lib/python3.10/site-packages/ollama/_client.py", line 834, in chat
return await self._request(
File "/home/.local/lib/python3.10/site-packages/ollama/_client.py", line 679, in _request
return cls(**(await self._request_raw(*args, **kwargs)).json())
File "/home/.local/lib/python3.10/site-packages/ollama/_client.py", line 624, in _request_raw
raise ResponseError(e.response.text, e.response.status_code) from None
ollama._types.ResponseError: POST predict: Post "http://127.0.0.1:35843/completion": EOF
Here is the Ollama log:
Jan 20 09:55:34 ollama[174]: time=2025-01-20T09:55:34.271-05:00 level=INFO source=.:0 msg="Server listening on 127.0.0.1:35843"
Jan 20 09:55:34 ollama[174]: llama_model_loader: loaded meta data with 29 key-value pairs and 291 tensors from /usr/share/ollama/.ollama/models/blobs/sha256-87048bcd55216712ef14c11c2c303728463207b165bf18440b9b84b07ec00f87 (version GGUF V3 (latest))
Jan 20 09:55:34 ollama[174]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 0: general.architecture str = llama
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 1: general.type str = model
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 2: general.name str = Meta Llama 3.1 8B Instruct
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 3: general.finetune str = Instruct
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 4: general.basename str = Meta-Llama-3.1
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 5: general.size_label str = 8B
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 6: general.license str = llama3.1
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 7: general.tags arr[str,6] = ["facebook", "meta", "pytorch", "llam...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 8: general.languages arr[str,8] = ["en", "de", "fr", "it", "pt", "hi", ...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 9: llama.block_count u32 = 32
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 10: llama.context_length u32 = 131072
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 11: llama.embedding_length u32 = 4096
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 12: llama.feed_forward_length u32 = 14336
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 13: llama.attention.head_count u32 = 32
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 14: llama.attention.head_count_kv u32 = 8
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 15: llama.rope.freq_base f32 = 500000.000000
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 16: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 17: general.file_type u32 = 2
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 18: llama.vocab_size u32 = 128256
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 19: llama.rope.dimension_count u32 = 128
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 20: tokenizer.ggml.model str = gpt2
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 21: tokenizer.ggml.pre str = llama-bpe
Jan 20 09:55:34 ollama[174]: time=2025-01-20T09:55:34.325-05:00 level=INFO source=server.go:610 msg="waiting for server to become available" status="llm server loading model"
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 22: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 23: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 24: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 25: tokenizer.ggml.bos_token_id u32 = 128000
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 26: tokenizer.ggml.eos_token_id u32 = 128009
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 27: tokenizer.chat_template str = {% set loop_messages = messages %}{% ...
Jan 20 09:55:34 ollama[174]: llama_model_loader: - kv 28: general.quantization_version u32 = 2
Jan 20 09:55:34 ollama[174]: llama_model_loader: - type f32: 65 tensors
Jan 20 09:55:34 ollama[174]: llama_model_loader: - type q4_0: 225 tensors
Jan 20 09:55:34 ollama[174]: llama_model_loader: - type q6_K: 1 tensors
Jan 20 09:55:34 ollama[174]: llm_load_vocab: special tokens cache size = 256
Jan 20 09:55:34 ollama[174]: llm_load_vocab: token to piece cache size = 0.7999 MB
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: format = GGUF V3 (latest)
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: arch = llama
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: vocab type = BPE
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_vocab = 128256
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_merges = 280147
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: vocab_only = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_ctx_train = 131072
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_embd = 4096
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_layer = 32
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_head = 32
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_head_kv = 8
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_rot = 128
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_swa = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_embd_head_k = 128
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_embd_head_v = 128
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_gqa = 4
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_embd_k_gqa = 1024
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_embd_v_gqa = 1024
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: f_norm_eps = 0.0e+00
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: f_norm_rms_eps = 1.0e-05
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: f_clamp_kqv = 0.0e+00
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: f_logit_scale = 0.0e+00
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_ff = 14336
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_expert = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_expert_used = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: causal attn = 1
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: pooling type = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: rope type = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: rope scaling = linear
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: freq_base_train = 500000.0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: freq_scale_train = 1
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: n_ctx_orig_yarn = 131072
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: rope_finetuned = unknown
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: ssm_d_conv = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: ssm_d_inner = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: ssm_d_state = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: ssm_dt_rank = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: ssm_dt_b_c_rms = 0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: model type = 8B
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: model ftype = Q4_0
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: model params = 8.03 B
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: model size = 4.33 GiB (4.64 BPW)
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: general.name = Meta Llama 3.1 8B Instruct
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: EOS token = 128009 '<|eot_id|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: LF token = 128 'Ä'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: EOT token = 128009 '<|eot_id|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: EOM token = 128008 '<|eom_id|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: EOG token = 128008 '<|eom_id|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: EOG token = 128009 '<|eot_id|>'
Jan 20 09:55:34 ollama[174]: llm_load_print_meta: max token length = 256
Jan 20 09:55:34 ollama[174]: ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
Jan 20 09:55:34 ollama[174]: ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
Jan 20 09:55:34 ollama[174]: ggml_cuda_init: found 1 CUDA devices:
Jan 20 09:55:34 ollama[174]: Device 0: NVIDIA GeForce RTX 4090 Laptop GPU, compute capability 8.9, VMM: yes
Jan 20 09:55:35 ollama[174]: llm_load_tensors: ggml ctx size = 0.27 MiB
Jan 20 09:55:39 ollama[174]: llm_load_tensors: offloading 32 repeating layers to GPU
Jan 20 09:55:39 ollama[174]: llm_load_tensors: offloading non-repeating layers to GPU
Jan 20 09:55:39 ollama[174]: llm_load_tensors: offloaded 33/33 layers to GPU
Jan 20 09:55:39 ollama[174]: llm_load_tensors: CPU buffer size = 281.81 MiB
Jan 20 09:55:39 ollama[174]: llm_load_tensors: CUDA0 buffer size = 4155.99 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: n_ctx = 15616
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: n_batch = 2048
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: n_ubatch = 512
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: flash_attn = 0
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: freq_base = 500000.0
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: freq_scale = 1
Jan 20 09:55:40 ollama[174]: llama_kv_cache_init: CUDA0 KV buffer size = 1952.00 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: KV self size = 1952.00 MiB, K (f16): 976.00 MiB, V (f16): 976.00 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: CUDA_Host output buffer size = 2.02 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: CUDA0 compute buffer size = 1038.50 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: CUDA_Host compute buffer size = 38.51 MiB
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: graph nodes = 1030
Jan 20 09:55:40 ollama[174]: llama_new_context_with_model: graph splits = 2
Jan 20 09:55:40 ollama[174]: time=2025-01-20T09:55:40.352-05:00 level=INFO source=server.go:615 msg="llama runner started in 6.28 seconds"
Jan 20 09:56:01 ollama[174]: [GIN] 2025/01/20 - 09:56:01 | 200 | 27.323051198s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:56:16 ollama[174]: [GIN] 2025/01/20 - 09:56:16 | 200 | 42.63900776s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:56:20 ollama[174]: [GIN] 2025/01/20 - 09:56:20 | 200 | 47.105286485s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:56:23 ollama[174]: [GIN] 2025/01/20 - 09:56:23 | 200 | 49.422469993s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:02 ollama[174]: [GIN] 2025/01/20 - 09:57:02 | 200 | 1m1s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:07 ollama[174]: [GIN] 2025/01/20 - 09:57:07 | 200 | 44.852374838s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:10 ollama[174]: [GIN] 2025/01/20 - 09:57:10 | 200 | 54.031440642s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:17 ollama[174]: [GIN] 2025/01/20 - 09:57:17 | 200 | 7.628685854s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:54 ollama[174]: [GIN] 2025/01/20 - 09:57:54 | 200 | 51.377588763s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:57:58 ollama[174]: [GIN] 2025/01/20 - 09:57:58 | 200 | 50.995044466s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:58:05 ollama[174]: [GIN] 2025/01/20 - 09:58:05 | 200 | 47.414462265s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:58:45 ollama[174]: [GIN] 2025/01/20 - 09:58:45 | 200 | 51.214899981s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:59:12 ollama[174]: [GIN] 2025/01/20 - 09:59:12 | 200 | 1m13s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:59:18 ollama[174]: [GIN] 2025/01/20 - 09:59:18 | 200 | 1m13s | 127.0.0.1 | POST "/api/chat"
Jan 20 09:59:37 ollama[174]: [GIN] 2025/01/20 - 09:59:37 | 200 | 51.817729012s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:00:11 ollama[174]: [GIN] 2025/01/20 - 10:00:11 | 200 | 59.507936464s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:00:22 ollama[174]: [GIN] 2025/01/20 - 10:00:22 | 200 | 1m3s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:00:52 ollama[174]: [GIN] 2025/01/20 - 10:00:52 | 200 | 1m15s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:05 ollama[174]: [GIN] 2025/01/20 - 10:01:05 | 200 | 42.657578432s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:10 ollama[174]: [GIN] 2025/01/20 - 10:01:10 | 200 | 58.529013717s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:16 ollama[174]: [GIN] 2025/01/20 - 10:01:16 | 200 | 23.427177508s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:23 ollama[174]: [GIN] 2025/01/20 - 10:01:23 | 200 | 17.708491186s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:32 ollama[174]: [GIN] 2025/01/20 - 10:01:32 | 200 | 22.494067591s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:37 ollama[174]: [GIN] 2025/01/20 - 10:01:37 | 200 | 21.82285361s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:41 ollama[174]: [GIN] 2025/01/20 - 10:01:41 | 200 | 18.875235561s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:50 ollama[174]: [GIN] 2025/01/20 - 10:01:50 | 200 | 17.608728526s | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:51 ollama[174]: panic: failed to decode batch: could not find a kv cache slot
Jan 20 10:01:51 ollama[174]: goroutine 7 [running]:
Jan 20 10:01:51 ollama[174]: main.(*Server).run(0xc0000ec120, {0x55e6900a79a0, 0xc0000c20a0})
Jan 20 10:01:51 ollama[174]: github.com/ollama/ollama/llama/runner/runner.go:344 +0x23e
Jan 20 10:01:51 ollama[174]: created by main.main in goroutine 1
Jan 20 10:01:51 ollama[174]: github.com/ollama/ollama/llama/runner/runner.go:980 +0xd3e
Jan 20 10:01:51 ollama[174]: [GIN] 2025/01/20 - 10:01:51 | 500 | 753.826293ms | 127.0.0.1 | POST "/api/chat"
Jan 20 10:01:51 ollama[174]: [GIN] 2025/01/20 - 10:01:51 | 500 | 5m30s | 127.0.0.1 | POST "/api/chat"
### OS
Linux
### GPU
Nvidia
### CPU
Intel
### Ollama version
0.5.1 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8503/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8503/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4147 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4147/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4147/comments | https://api.github.com/repos/ollama/ollama/issues/4147/events | https://github.com/ollama/ollama/pull/4147 | 2,278,707,111 | PR_kwDOJ0Z1Ps5uhx48 | 4,147 | Adding '/website' to serve up static files under a directory defined with the env OLLAMA_WEBSITE | {
"login": "1feralcat",
"id": 51179976,
"node_id": "MDQ6VXNlcjUxMTc5OTc2",
"avatar_url": "https://avatars.githubusercontent.com/u/51179976?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/1feralcat",
"html_url": "https://github.com/1feralcat",
"followers_url": "https://api.github.com/users/1feralcat/followers",
"following_url": "https://api.github.com/users/1feralcat/following{/other_user}",
"gists_url": "https://api.github.com/users/1feralcat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/1feralcat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/1feralcat/subscriptions",
"organizations_url": "https://api.github.com/users/1feralcat/orgs",
"repos_url": "https://api.github.com/users/1feralcat/repos",
"events_url": "https://api.github.com/users/1feralcat/events{/privacy}",
"received_events_url": "https://api.github.com/users/1feralcat/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-04T03:38:07 | 2024-05-04T04:27:42 | 2024-05-04T04:27:42 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4147",
"html_url": "https://github.com/ollama/ollama/pull/4147",
"diff_url": "https://github.com/ollama/ollama/pull/4147.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4147.patch",
"merged_at": null
} | null | {
"login": "1feralcat",
"id": 51179976,
"node_id": "MDQ6VXNlcjUxMTc5OTc2",
"avatar_url": "https://avatars.githubusercontent.com/u/51179976?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/1feralcat",
"html_url": "https://github.com/1feralcat",
"followers_url": "https://api.github.com/users/1feralcat/followers",
"following_url": "https://api.github.com/users/1feralcat/following{/other_user}",
"gists_url": "https://api.github.com/users/1feralcat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/1feralcat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/1feralcat/subscriptions",
"organizations_url": "https://api.github.com/users/1feralcat/orgs",
"repos_url": "https://api.github.com/users/1feralcat/repos",
"events_url": "https://api.github.com/users/1feralcat/events{/privacy}",
"received_events_url": "https://api.github.com/users/1feralcat/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4147/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4147/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/7035 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7035/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7035/comments | https://api.github.com/repos/ollama/ollama/issues/7035/events | https://github.com/ollama/ollama/issues/7035 | 2,554,953,798 | I_kwDOJ0Z1Ps6YSYBG | 7,035 | Support AMD GPUs via WSL | {
"login": "vignessh",
"id": 1451706,
"node_id": "MDQ6VXNlcjE0NTE3MDY=",
"avatar_url": "https://avatars.githubusercontent.com/u/1451706?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/vignessh",
"html_url": "https://github.com/vignessh",
"followers_url": "https://api.github.com/users/vignessh/followers",
"following_url": "https://api.github.com/users/vignessh/following{/other_user}",
"gists_url": "https://api.github.com/users/vignessh/gists{/gist_id}",
"starred_url": "https://api.github.com/users/vignessh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/vignessh/subscriptions",
"organizations_url": "https://api.github.com/users/vignessh/orgs",
"repos_url": "https://api.github.com/users/vignessh/repos",
"events_url": "https://api.github.com/users/vignessh/events{/privacy}",
"received_events_url": "https://api.github.com/users/vignessh/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 5860134234,
"node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg",
"url": "https://api.github.com/repos/ollama/ollama/labels/windows",
"name": "windows",
"color": "0052CC",
"default": false,
"description": ""
},
{
"id": 6433346500,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA",
"url": "https://api.github.com/repos/ollama/ollama/labels/amd",
"name": "amd",
"color": "000000",
"default": false,
"description": "Issues relating to AMD GPUs and ROCm"
},
{
"id": 6677675697,
"node_id": "LA_kwDOJ0Z1Ps8AAAABjgU-sQ",
"url": "https://api.github.com/repos/ollama/ollama/labels/wsl",
"name": "wsl",
"color": "7E0821",
"default": false,
"description": "Issues using WSL"
}
] | closed | false | null | [] | null | 1 | 2024-09-29T14:07:29 | 2024-09-30T16:57:47 | 2024-09-30T16:57:32 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hello,
I'm running a Windows 11 workstation based on an AMD RX 7900XTX GPU. I installed the latest Ollama for Windows and with that I can see the GPU getting used for any queries. I also tried the Linux install for WSL following [this](https://community.amd.com/t5/ai/running-llms-locally-on-amd-gpus-with-ollama/ba-p/713266) guide. With WSL however, even after installing ROCm as mentioned in the guide, I can see Ollama not making use of the GPU. I tried following [this](https://github.com/ollama/ollama/issues/5275) issue to build Olama locally with the fix mentioned, but no success there either. The ollama server keeps crashing when I attempt to run any model like `llama2` or `llama3.2`.
Please can someone help with this. I really don't want to switch to a Nvidia just for this purpose.
Thanks,
Vignessh | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7035/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7035/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/7469 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7469/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7469/comments | https://api.github.com/repos/ollama/ollama/issues/7469/events | https://github.com/ollama/ollama/pull/7469 | 2,630,255,833 | PR_kwDOJ0Z1Ps6AsIcO | 7,469 | Fix unsafe.Slice error with mllama | {
"login": "jessegross",
"id": 6468499,
"node_id": "MDQ6VXNlcjY0Njg0OTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jessegross",
"html_url": "https://github.com/jessegross",
"followers_url": "https://api.github.com/users/jessegross/followers",
"following_url": "https://api.github.com/users/jessegross/following{/other_user}",
"gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jessegross/subscriptions",
"organizations_url": "https://api.github.com/users/jessegross/orgs",
"repos_url": "https://api.github.com/users/jessegross/repos",
"events_url": "https://api.github.com/users/jessegross/events{/privacy}",
"received_events_url": "https://api.github.com/users/jessegross/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2024-11-02T06:22:05 | 2024-11-02T23:04:49 | 2024-11-02T20:37:56 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/7469",
"html_url": "https://github.com/ollama/ollama/pull/7469",
"diff_url": "https://github.com/ollama/ollama/pull/7469.diff",
"patch_url": "https://github.com/ollama/ollama/pull/7469.patch",
"merged_at": "2024-11-02T20:37:55"
} | Fix the error and also improve error handling for the llama.cpp CGo layer. | {
"login": "jessegross",
"id": 6468499,
"node_id": "MDQ6VXNlcjY0Njg0OTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jessegross",
"html_url": "https://github.com/jessegross",
"followers_url": "https://api.github.com/users/jessegross/followers",
"following_url": "https://api.github.com/users/jessegross/following{/other_user}",
"gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jessegross/subscriptions",
"organizations_url": "https://api.github.com/users/jessegross/orgs",
"repos_url": "https://api.github.com/users/jessegross/repos",
"events_url": "https://api.github.com/users/jessegross/events{/privacy}",
"received_events_url": "https://api.github.com/users/jessegross/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7469/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7469/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/6142 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6142/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6142/comments | https://api.github.com/repos/ollama/ollama/issues/6142/events | https://github.com/ollama/ollama/issues/6142 | 2,444,984,504 | I_kwDOJ0Z1Ps6Ru4C4 | 6,142 | BitDefender false positive when downloading Ollama Windows installer | {
"login": "E-Nyamsuren",
"id": 14015501,
"node_id": "MDQ6VXNlcjE0MDE1NTAx",
"avatar_url": "https://avatars.githubusercontent.com/u/14015501?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/E-Nyamsuren",
"html_url": "https://github.com/E-Nyamsuren",
"followers_url": "https://api.github.com/users/E-Nyamsuren/followers",
"following_url": "https://api.github.com/users/E-Nyamsuren/following{/other_user}",
"gists_url": "https://api.github.com/users/E-Nyamsuren/gists{/gist_id}",
"starred_url": "https://api.github.com/users/E-Nyamsuren/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/E-Nyamsuren/subscriptions",
"organizations_url": "https://api.github.com/users/E-Nyamsuren/orgs",
"repos_url": "https://api.github.com/users/E-Nyamsuren/repos",
"events_url": "https://api.github.com/users/E-Nyamsuren/events{/privacy}",
"received_events_url": "https://api.github.com/users/E-Nyamsuren/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 5860134234,
"node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg",
"url": "https://api.github.com/repos/ollama/ollama/labels/windows",
"name": "windows",
"color": "0052CC",
"default": false,
"description": ""
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 2 | 2024-08-02T13:37:55 | 2024-08-02T20:44:12 | 2024-08-02T20:44:11 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
BitDefender detects a false positive(?) in the downloaded Windows installer of Ollama.
I also have Ollama installer for Windows downloaded on 27 June 2024. That installer does not have this issue.
**Where**: The current downloadable installer of Ollama on the Ollama website.
**Threat name**: Gen:Suspicious.Cloud.4.@V3@aK5DIadi
**Antivirus**: BitDefender Total Security, Build 27.0.40.174 (latest update)
**OS**: Windows 11
![image](https://github.com/user-attachments/assets/fa1a0b68-0506-4946-b994-e360496037bb)
### OS
Windows
### GPU
Intel
### CPU
Intel
### Ollama version
_No response_ | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6142/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6142/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/6168 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6168/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6168/comments | https://api.github.com/repos/ollama/ollama/issues/6168/events | https://github.com/ollama/ollama/issues/6168 | 2,447,607,963 | I_kwDOJ0Z1Ps6R44ib | 6,168 | Installation via scoop fails | {
"login": "kawadumax",
"id": 11693767,
"node_id": "MDQ6VXNlcjExNjkzNzY3",
"avatar_url": "https://avatars.githubusercontent.com/u/11693767?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/kawadumax",
"html_url": "https://github.com/kawadumax",
"followers_url": "https://api.github.com/users/kawadumax/followers",
"following_url": "https://api.github.com/users/kawadumax/following{/other_user}",
"gists_url": "https://api.github.com/users/kawadumax/gists{/gist_id}",
"starred_url": "https://api.github.com/users/kawadumax/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/kawadumax/subscriptions",
"organizations_url": "https://api.github.com/users/kawadumax/orgs",
"repos_url": "https://api.github.com/users/kawadumax/repos",
"events_url": "https://api.github.com/users/kawadumax/events{/privacy}",
"received_events_url": "https://api.github.com/users/kawadumax/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396220,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA",
"url": "https://api.github.com/repos/ollama/ollama/labels/question",
"name": "question",
"color": "d876e3",
"default": true,
"description": "General questions"
},
{
"id": 5860134234,
"node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg",
"url": "https://api.github.com/repos/ollama/ollama/labels/windows",
"name": "windows",
"color": "0052CC",
"default": false,
"description": ""
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 1 | 2024-08-05T05:06:48 | 2024-08-09T20:38:23 | 2024-08-09T20:37:53 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
Related : https://github.com/ScoopInstaller/Main/issues/6074
Error quates;
```
Scoop was updated successfully!
Installing 'ollama' (0.3.3) [64bit] from 'main' bucket
OllamaSetup.exe (298.5 MB) [==================================================================================] 100%
Checking hash of OllamaSetup.exe ... ok.
Extracting OllamaSetup.exe ... ERROR Exit code was 1!
Failed to extract files from C:\Users\xialvjun\scoop\apps\ollama\0.3.3\OllamaSetup.exe.
Log file:
~\scoop\apps\ollama\0.3.3\innounp.log
Please try again or create a new issue by using the following link and paste your console output:
https://github.com/ScoopInstaller/Main/issues/new?title=ollama%400.3.3%3a+decompress+error
```
```
Signature detected: Inno Setup Setup Data (6.3.0)
This is not directly supported, but i'll try to unpack it as version 5602
; Version detected: 6300
Critical error: The setup files are corrupted. Please obtain a new copy of the program.
```
### OS
Windows
### GPU
Other
### CPU
Intel
### Ollama version
0.3.3 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6168/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6168/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3922 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3922/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3922/comments | https://api.github.com/repos/ollama/ollama/issues/3922/events | https://github.com/ollama/ollama/issues/3922 | 2,264,551,533 | I_kwDOJ0Z1Ps6G-lBt | 3,922 | JSON list of available models | {
"login": "ricardobalk",
"id": 14904229,
"node_id": "MDQ6VXNlcjE0OTA0MjI5",
"avatar_url": "https://avatars.githubusercontent.com/u/14904229?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ricardobalk",
"html_url": "https://github.com/ricardobalk",
"followers_url": "https://api.github.com/users/ricardobalk/followers",
"following_url": "https://api.github.com/users/ricardobalk/following{/other_user}",
"gists_url": "https://api.github.com/users/ricardobalk/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ricardobalk/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ricardobalk/subscriptions",
"organizations_url": "https://api.github.com/users/ricardobalk/orgs",
"repos_url": "https://api.github.com/users/ricardobalk/repos",
"events_url": "https://api.github.com/users/ricardobalk/events{/privacy}",
"received_events_url": "https://api.github.com/users/ricardobalk/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 6573197867,
"node_id": "LA_kwDOJ0Z1Ps8AAAABh8sKKw",
"url": "https://api.github.com/repos/ollama/ollama/labels/ollama.com",
"name": "ollama.com",
"color": "ffffff",
"default": false,
"description": ""
}
] | closed | false | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 5 | 2024-04-25T21:47:11 | 2025-01-24T08:46:48 | 2024-05-09T22:12:02 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I saw that there's a list of available models at https://ollama.com/library. Is there a JSON-formatted file for this list? I would like to integrate it into a Python application I'm building.
**Edit: I forgot to mention that I with _file_, I actually mean a _JSON-formatted response on a public endpoint_, so that my Python application can always download an up-to-date list of available models.** | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3922/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3922/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/4449 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4449/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4449/comments | https://api.github.com/repos/ollama/ollama/issues/4449/events | https://github.com/ollama/ollama/issues/4449 | 2,297,473,248 | I_kwDOJ0Z1Ps6I8Kjg | 4,449 | openai.error.InvalidRequestError: model 'deepseek-coder:6.7b' not found, try pulling it first | {
"login": "userandpass",
"id": 26294920,
"node_id": "MDQ6VXNlcjI2Mjk0OTIw",
"avatar_url": "https://avatars.githubusercontent.com/u/26294920?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/userandpass",
"html_url": "https://github.com/userandpass",
"followers_url": "https://api.github.com/users/userandpass/followers",
"following_url": "https://api.github.com/users/userandpass/following{/other_user}",
"gists_url": "https://api.github.com/users/userandpass/gists{/gist_id}",
"starred_url": "https://api.github.com/users/userandpass/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/userandpass/subscriptions",
"organizations_url": "https://api.github.com/users/userandpass/orgs",
"repos_url": "https://api.github.com/users/userandpass/repos",
"events_url": "https://api.github.com/users/userandpass/events{/privacy}",
"received_events_url": "https://api.github.com/users/userandpass/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 1 | 2024-05-15T10:20:31 | 2024-07-31T17:58:26 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
docker run -d --gpus="device=0" -v ollama:/root/.ollama -p 8010:11434 --name ollama ollama/ollama
docker exec -it ollama ollama run deepseek-coder:6.7b
I got the error in the title when I called it on port 8010 on another computer
### OS
Linux
### GPU
Nvidia
### CPU
_No response_
### Ollama version
_No response_ | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4449/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4449/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/6110 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6110/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6110/comments | https://api.github.com/repos/ollama/ollama/issues/6110/events | https://github.com/ollama/ollama/pull/6110 | 2,441,153,721 | PR_kwDOJ0Z1Ps53DPgF | 6,110 | llama: Get embeddings working | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-08-01T00:04:05 | 2024-08-01T14:59:38 | 2024-08-01T14:59:35 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/6110",
"html_url": "https://github.com/ollama/ollama/pull/6110",
"diff_url": "https://github.com/ollama/ollama/pull/6110.diff",
"patch_url": "https://github.com/ollama/ollama/pull/6110.patch",
"merged_at": "2024-08-01T14:59:35"
} | Truncation doesn't pass, but the other embeddings tests pass | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6110/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6110/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/6677 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6677/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6677/comments | https://api.github.com/repos/ollama/ollama/issues/6677/events | https://github.com/ollama/ollama/issues/6677 | 2,510,405,967 | I_kwDOJ0Z1Ps6VocFP | 6,677 | VG | {
"login": "vioricavg",
"id": 163665189,
"node_id": "U_kgDOCcFVJQ",
"avatar_url": "https://avatars.githubusercontent.com/u/163665189?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/vioricavg",
"html_url": "https://github.com/vioricavg",
"followers_url": "https://api.github.com/users/vioricavg/followers",
"following_url": "https://api.github.com/users/vioricavg/following{/other_user}",
"gists_url": "https://api.github.com/users/vioricavg/gists{/gist_id}",
"starred_url": "https://api.github.com/users/vioricavg/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/vioricavg/subscriptions",
"organizations_url": "https://api.github.com/users/vioricavg/orgs",
"repos_url": "https://api.github.com/users/vioricavg/repos",
"events_url": "https://api.github.com/users/vioricavg/events{/privacy}",
"received_events_url": "https://api.github.com/users/vioricavg/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2024-09-06T13:11:03 | 2024-09-09T18:37:38 | 2024-09-06T21:16:18 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | # Use a pipeline as a high-level helper
from transformers import pipeline
messages = [
{"role": "user", "content": "Who are you?"},
]
pipe = pipeline("text-generation", model="LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", trust_remote_code=True)
pipe(messages) | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6677/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6677/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3809 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3809/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3809/comments | https://api.github.com/repos/ollama/ollama/issues/3809/events | https://github.com/ollama/ollama/issues/3809 | 2,255,511,041 | I_kwDOJ0Z1Ps6GcF4B | 3,809 | AMD gfx90a unrecognized (seen as gfx9010) | {
"login": "simark",
"id": 1758287,
"node_id": "MDQ6VXNlcjE3NTgyODc=",
"avatar_url": "https://avatars.githubusercontent.com/u/1758287?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/simark",
"html_url": "https://github.com/simark",
"followers_url": "https://api.github.com/users/simark/followers",
"following_url": "https://api.github.com/users/simark/following{/other_user}",
"gists_url": "https://api.github.com/users/simark/gists{/gist_id}",
"starred_url": "https://api.github.com/users/simark/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/simark/subscriptions",
"organizations_url": "https://api.github.com/users/simark/orgs",
"repos_url": "https://api.github.com/users/simark/repos",
"events_url": "https://api.github.com/users/simark/events{/privacy}",
"received_events_url": "https://api.github.com/users/simark/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 6433346500,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA",
"url": "https://api.github.com/repos/ollama/ollama/labels/amd",
"name": "amd",
"color": "000000",
"default": false,
"description": "Issues relating to AMD GPUs and ROCm"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 0 | 2024-04-22T03:42:52 | 2024-04-24T18:07:50 | 2024-04-24T18:07:50 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I have a system with 3 AMD cards, a gfx900, a gfx906 and a gfx90a. When I launch `ollama serve`, I see:
time=2024-04-21T23:32:10.765-04:00 level=INFO source=amd_linux.go:121 msg="amdgpu [3] gfx900 is supported"
time=2024-04-21T23:32:10.765-04:00 level=INFO source=amd_linux.go:121 msg="amdgpu [4] gfx906 is supported"
time=2024-04-21T23:32:10.765-04:00 level=WARN source=amd_linux.go:116 msg="amdgpu [5] gfx9010 is not supported by /tmp/ollama2577336771/rocm [gfx1030 gfx1100 gfx1101 gfx1102 gfx900 gfx906 gfx908 gfx90a gfx940 gfx941 gfx942]"
The `gfx9010` looks fishy. I have the feeling that the last `%d` [here](https://github.com/ollama/ollama/blob/62be2050dd83197864d771fe6891fc47486ee6a1/gpu/amd_linux.go#L450) should be a `%x`.
### OS
Linux
### GPU
AMD
### CPU
AMD
### Ollama version
0.1.32 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3809/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3809/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/2508 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2508/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2508/comments | https://api.github.com/repos/ollama/ollama/issues/2508/events | https://github.com/ollama/ollama/issues/2508 | 2,135,638,897 | I_kwDOJ0Z1Ps5_S0Nx | 2,508 | OLLAMA_KEEP_ALIVE ENV feature | {
"login": "uxfion",
"id": 44778029,
"node_id": "MDQ6VXNlcjQ0Nzc4MDI5",
"avatar_url": "https://avatars.githubusercontent.com/u/44778029?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/uxfion",
"html_url": "https://github.com/uxfion",
"followers_url": "https://api.github.com/users/uxfion/followers",
"following_url": "https://api.github.com/users/uxfion/following{/other_user}",
"gists_url": "https://api.github.com/users/uxfion/gists{/gist_id}",
"starred_url": "https://api.github.com/users/uxfion/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/uxfion/subscriptions",
"organizations_url": "https://api.github.com/users/uxfion/orgs",
"repos_url": "https://api.github.com/users/uxfion/repos",
"events_url": "https://api.github.com/users/uxfion/events{/privacy}",
"received_events_url": "https://api.github.com/users/uxfion/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | null | [] | null | 2 | 2024-02-15T04:53:03 | 2024-03-13T20:29:41 | 2024-03-13T20:29:41 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Does anyone know how to set `keep_alive` in the openai API? It seems that this feature is not supported in the openai API.
It would be better if we could set `OLLAMA_KEEP_ALIVE` in the environment variables, since the `/v1/chat/completions` endpoint is difficult to support customized parameters.
https://github.com/ollama/ollama/pull/2146#issue-2094810743 | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2508/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 1
} | https://api.github.com/repos/ollama/ollama/issues/2508/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8576 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8576/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8576/comments | https://api.github.com/repos/ollama/ollama/issues/8576/events | https://github.com/ollama/ollama/issues/8576 | 2,810,803,952 | I_kwDOJ0Z1Ps6niXbw | 8,576 | How to save chat history / conversations to a file when running ollama models from terminal? | {
"login": "dimyself",
"id": 36783626,
"node_id": "MDQ6VXNlcjM2NzgzNjI2",
"avatar_url": "https://avatars.githubusercontent.com/u/36783626?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dimyself",
"html_url": "https://github.com/dimyself",
"followers_url": "https://api.github.com/users/dimyself/followers",
"following_url": "https://api.github.com/users/dimyself/following{/other_user}",
"gists_url": "https://api.github.com/users/dimyself/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dimyself/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dimyself/subscriptions",
"organizations_url": "https://api.github.com/users/dimyself/orgs",
"repos_url": "https://api.github.com/users/dimyself/repos",
"events_url": "https://api.github.com/users/dimyself/events{/privacy}",
"received_events_url": "https://api.github.com/users/dimyself/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | open | false | null | [] | null | 1 | 2025-01-25T06:29:28 | 2025-01-25T08:43:26 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hello! I feel like this is a stupid question, but i can't find the answer. I don't really see documentation on this.
When I run ollama models on linux in terminal, is there a way to save the chat / conversation to a file??
I tried /save and that didn't work. I checked .ollama/history, but that is only ollama prompts.
Where are the actual chats saved? Or how to save them?
I don't want to manually copy/Paste the chat to a text editor
Is this the only way:
`ollama --command "your_command" > ollama_chat_history.log`
??
It seems there should be a config setting to save to specified file
If i'm in an ollama prompt (>>>), and I ask it a question or prompt, it should auto save the conversation
Thanks! | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8576/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8576/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4397 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4397/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4397/comments | https://api.github.com/repos/ollama/ollama/issues/4397/events | https://github.com/ollama/ollama/issues/4397 | 2,292,380,803 | I_kwDOJ0Z1Ps6IovSD | 4,397 | how to keep system prompt permanently after setting SYSTEM | {
"login": "taozhiyuai",
"id": 146583103,
"node_id": "U_kgDOCLyuPw",
"avatar_url": "https://avatars.githubusercontent.com/u/146583103?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/taozhiyuai",
"html_url": "https://github.com/taozhiyuai",
"followers_url": "https://api.github.com/users/taozhiyuai/followers",
"following_url": "https://api.github.com/users/taozhiyuai/following{/other_user}",
"gists_url": "https://api.github.com/users/taozhiyuai/gists{/gist_id}",
"starred_url": "https://api.github.com/users/taozhiyuai/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/taozhiyuai/subscriptions",
"organizations_url": "https://api.github.com/users/taozhiyuai/orgs",
"repos_url": "https://api.github.com/users/taozhiyuai/repos",
"events_url": "https://api.github.com/users/taozhiyuai/events{/privacy}",
"received_events_url": "https://api.github.com/users/taozhiyuai/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 2 | 2024-05-13T10:08:20 | 2024-05-14T00:46:33 | 2024-05-14T00:26:16 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
1. /set SYSTEM
2. /show modelfile, I can see the setting is updated.
3. if I /bye, and ollama run model again, and /show modelfile , it seems SYSTEM in modelfile is restored.
is that possible the model can keep my new SYSTEM setting permanently?
### OS
macOS
### GPU
Apple
### CPU
Apple
### Ollama version
1.37 | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4397/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4397/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8454 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8454/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8454/comments | https://api.github.com/repos/ollama/ollama/issues/8454/events | https://github.com/ollama/ollama/pull/8454 | 2,792,255,964 | PR_kwDOJ0Z1Ps6H-0JU | 8,454 | Align file position to general.alignment at end of decoding. | {
"login": "rick-github",
"id": 14946854,
"node_id": "MDQ6VXNlcjE0OTQ2ODU0",
"avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/rick-github",
"html_url": "https://github.com/rick-github",
"followers_url": "https://api.github.com/users/rick-github/followers",
"following_url": "https://api.github.com/users/rick-github/following{/other_user}",
"gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}",
"starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/rick-github/subscriptions",
"organizations_url": "https://api.github.com/users/rick-github/orgs",
"repos_url": "https://api.github.com/users/rick-github/repos",
"events_url": "https://api.github.com/users/rick-github/events{/privacy}",
"received_events_url": "https://api.github.com/users/rick-github/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | open | false | null | [] | null | 0 | 2025-01-16T10:03:24 | 2025-01-16T17:20:02 | null | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/8454",
"html_url": "https://github.com/ollama/ollama/pull/8454",
"diff_url": "https://github.com/ollama/ollama/pull/8454.diff",
"patch_url": "https://github.com/ollama/ollama/pull/8454.patch",
"merged_at": null
} | Align the file position at the end of DecodeGGML with `general.alignment`.
Fixes: #8456
Fixes: #5939 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8454/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8454/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/4144 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4144/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4144/comments | https://api.github.com/repos/ollama/ollama/issues/4144/events | https://github.com/ollama/ollama/pull/4144 | 2,278,576,977 | PR_kwDOJ0Z1Ps5uhWm1 | 4,144 | Make maximum pending request configurable | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-03T23:37:51 | 2024-05-05T17:53:47 | 2024-05-05T17:53:44 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4144",
"html_url": "https://github.com/ollama/ollama/pull/4144",
"diff_url": "https://github.com/ollama/ollama/pull/4144.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4144.patch",
"merged_at": "2024-05-05T17:53:44"
} | Bump the maximum queued requests to 512 (from 10)
Make it configurable with a new env var `OLLAMA_MAX_QUEUE`
Return a 503 when the server is too busy instead of more generic 500.
Fixes #4124
With the added integration test, here are some quick memory stats on linux:
- Just starting ollama RSS 429.0m
- Load orca-mini: RSS 456.8m. (just the Go process, not the child runner)
- During my stress test where I push >512 connections: RSS 489.0m
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4144/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4144/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/1660 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1660/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1660/comments | https://api.github.com/repos/ollama/ollama/issues/1660/events | https://github.com/ollama/ollama/issues/1660 | 2,052,754,121 | I_kwDOJ0Z1Ps56WorJ | 1,660 | Docker image for quantize/convert no longer working | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2023-12-21T16:44:33 | 2023-12-21T17:21:41 | 2023-12-21T17:21:41 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I have an older version of the image on my Mac and converting a model works fine. But I pulled to a new machine and getting an error about protobufs.
```
You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
Traceback (most recent call last):
File "/workdir/llama.cpp/convert.py", line 1279, in <module>
main()
File "/workdir/llama.cpp/convert.py", line 1255, in main
vocab = VocabLoader(params, vocab_dir)
File "/workdir/llama.cpp/convert.py", line 342, in __init__
self.tokenizer = AutoTokenizer.from_pretrained(str(fname_tokenizer), trust_remote_code=True)
File "/usr/local/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 787, in from_pretrained
Loading model file /model/pytorch_model-00001-of-00006.bin
Loading model file /model/pytorch_model-00001-of-00006.bin
Loading model file /model/pytorch_model-00002-of-00006.bin
Loading model file /model/pytorch_model-00003-of-00006.bin
Loading model file /model/pytorch_model-00004-of-00006.bin
Loading model file /model/pytorch_model-00005-of-00006.bin
Loading model file /model/pytorch_model-00006-of-00006.bin
params = Params(n_vocab=32001, n_embd=5120, n_layer=40, n_ctx=2048, n_ff=13824, n_head=40, n_head_kv=40, n_experts=None, n_experts_used=None, f_norm_eps=1e-06, rope_scaling_type=None, f_rope_freq_base=None, f_rope_scale=None, n_orig_ctx=None, rope_finetuned=None, ftype=<GGMLFileType.MostlyF16: 1>, path_model=PosixPath('/model'))
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2028, in from_pretrained
return cls._from_pretrained(
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2260, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/usr/local/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py", line 124, in __init__
super().__init__(
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py", line 114, in __init__
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
File "/usr/local/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py", line 1336, in convert_slow_tokenizer
return converter_class(transformer_tokenizer).converted()
File "/usr/local/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py", line 459, in __init__
requires_backends(self, "protobuf")
File "/usr/local/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1276, in requires_backends
raise ImportError("".join(failed))
ImportError:
LlamaConverter requires the protobuf library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
```
The model is chavinlo/gpt4-x-alpaca but an older image works just fine to do the conversion. | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1660/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1660/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/2340 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2340/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2340/comments | https://api.github.com/repos/ollama/ollama/issues/2340/events | https://github.com/ollama/ollama/pull/2340 | 2,116,716,437 | PR_kwDOJ0Z1Ps5l7VJQ | 2,340 | Add llm-ollama plugin for Datasette's LLM CLI to README | {
"login": "easp",
"id": 414705,
"node_id": "MDQ6VXNlcjQxNDcwNQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/414705?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/easp",
"html_url": "https://github.com/easp",
"followers_url": "https://api.github.com/users/easp/followers",
"following_url": "https://api.github.com/users/easp/following{/other_user}",
"gists_url": "https://api.github.com/users/easp/gists{/gist_id}",
"starred_url": "https://api.github.com/users/easp/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/easp/subscriptions",
"organizations_url": "https://api.github.com/users/easp/orgs",
"repos_url": "https://api.github.com/users/easp/repos",
"events_url": "https://api.github.com/users/easp/events{/privacy}",
"received_events_url": "https://api.github.com/users/easp/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-02-03T23:37:18 | 2024-02-03T23:40:51 | 2024-02-03T23:40:50 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/2340",
"html_url": "https://github.com/ollama/ollama/pull/2340",
"diff_url": "https://github.com/ollama/ollama/pull/2340.diff",
"patch_url": "https://github.com/ollama/ollama/pull/2340.patch",
"merged_at": "2024-02-03T23:40:50"
} | The Datasette project's LLM cli provides a common interface to a variety of LLM APIs and local LLMs. This PR adds a link to an Ollama plugin for that tool. | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2340/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/2340/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/3481 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3481/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3481/comments | https://api.github.com/repos/ollama/ollama/issues/3481/events | https://github.com/ollama/ollama/pull/3481 | 2,224,297,232 | PR_kwDOJ0Z1Ps5rpGQd | 3,481 | CI subprocess path fix | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-04-04T02:13:10 | 2024-04-04T02:29:13 | 2024-04-04T02:29:10 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/3481",
"html_url": "https://github.com/ollama/ollama/pull/3481",
"diff_url": "https://github.com/ollama/ollama/pull/3481.diff",
"patch_url": "https://github.com/ollama/ollama/pull/3481.patch",
"merged_at": "2024-04-04T02:29:10"
} | null | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3481/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3481/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/6770 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6770/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6770/comments | https://api.github.com/repos/ollama/ollama/issues/6770/events | https://github.com/ollama/ollama/issues/6770 | 2,521,230,892 | I_kwDOJ0Z1Ps6WRu4s | 6,770 | Library missing from ollama when running it in Docker | {
"login": "factor3",
"id": 138332567,
"node_id": "U_kgDOCD7Jlw",
"avatar_url": "https://avatars.githubusercontent.com/u/138332567?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/factor3",
"html_url": "https://github.com/factor3",
"followers_url": "https://api.github.com/users/factor3/followers",
"following_url": "https://api.github.com/users/factor3/following{/other_user}",
"gists_url": "https://api.github.com/users/factor3/gists{/gist_id}",
"starred_url": "https://api.github.com/users/factor3/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/factor3/subscriptions",
"organizations_url": "https://api.github.com/users/factor3/orgs",
"repos_url": "https://api.github.com/users/factor3/repos",
"events_url": "https://api.github.com/users/factor3/events{/privacy}",
"received_events_url": "https://api.github.com/users/factor3/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396220,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA",
"url": "https://api.github.com/repos/ollama/ollama/labels/question",
"name": "question",
"color": "d876e3",
"default": true,
"description": "General questions"
},
{
"id": 5755339642,
"node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg",
"url": "https://api.github.com/repos/ollama/ollama/labels/linux",
"name": "linux",
"color": "516E70",
"default": false,
"description": ""
},
{
"id": 6430601766,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg",
"url": "https://api.github.com/repos/ollama/ollama/labels/nvidia",
"name": "nvidia",
"color": "8CDB00",
"default": false,
"description": "Issues relating to Nvidia GPUs and CUDA"
},
{
"id": 6677677816,
"node_id": "LA_kwDOJ0Z1Ps8AAAABjgVG-A",
"url": "https://api.github.com/repos/ollama/ollama/labels/docker",
"name": "docker",
"color": "0052CC",
"default": false,
"description": "Issues relating to using ollama in containers"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 3 | 2024-09-12T03:06:53 | 2024-09-12T23:19:48 | 2024-09-12T23:19:48 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I am attempting to run the docker version of ollama with a nvidia GPU. I have put in all software as described at
https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
I attempted to run the ollama container:
docker run -d --gpus=all -v ./ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
and I am getting an error:
docker: Error response from daemon: failed to create task for container: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: error running hook #0: error running hook: exit status 1, stdout: , stderr: Auto-detected mode as 'legacy'
nvidia-container-cli: initialization error: load library failed: libnvidia-ml.so.1: cannot open shared object file: no such file or directory: unknown.
There is clearly something wrong here. Somehow, running the installation of the nvidia container toolkit does not put in a library. Can this be fixed?
### OS
Linux
### GPU
Nvidia
### CPU
AMD
### Ollama version
_No response_ | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6770/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6770/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5050 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5050/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5050/comments | https://api.github.com/repos/ollama/ollama/issues/5050/events | https://github.com/ollama/ollama/issues/5050 | 2,354,119,365 | I_kwDOJ0Z1Ps6MUQLF | 5,050 | Windows Based Ollama Updates Imposing Unjust Authority over Independent Applications | {
"login": "Soul2294",
"id": 15517546,
"node_id": "MDQ6VXNlcjE1NTE3NTQ2",
"avatar_url": "https://avatars.githubusercontent.com/u/15517546?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Soul2294",
"html_url": "https://github.com/Soul2294",
"followers_url": "https://api.github.com/users/Soul2294/followers",
"following_url": "https://api.github.com/users/Soul2294/following{/other_user}",
"gists_url": "https://api.github.com/users/Soul2294/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Soul2294/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Soul2294/subscriptions",
"organizations_url": "https://api.github.com/users/Soul2294/orgs",
"repos_url": "https://api.github.com/users/Soul2294/repos",
"events_url": "https://api.github.com/users/Soul2294/events{/privacy}",
"received_events_url": "https://api.github.com/users/Soul2294/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 5860134234,
"node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg",
"url": "https://api.github.com/repos/ollama/ollama/labels/windows",
"name": "windows",
"color": "0052CC",
"default": false,
"description": ""
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 3 | 2024-06-14T21:08:59 | 2024-06-19T16:13:41 | 2024-06-19T16:13:41 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
Every time ollama needs to be restarted for an update it either directly requests I close OBS or outright shuts it down mid-streaming, I couldn't for the life of me determine why, but perhaps you're both using the same &/or conflicting libraries?
Regardless of the why, it is completely senseless on the surface...
at bare minimum deserves an explanation so that I might try to find my own solution worst case...
### OS
Windows
### GPU
Nvidia
### CPU
Intel
### Ollama version
All of them, the entire time. Since the first release. | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5050/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5050/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3178 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3178/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3178/comments | https://api.github.com/repos/ollama/ollama/issues/3178/events | https://github.com/ollama/ollama/pull/3178 | 2,189,825,683 | PR_kwDOJ0Z1Ps5p0UUC | 3,178 | Add Saddle | {
"login": "jikkuatwork",
"id": 113770409,
"node_id": "U_kgDOBsf_qQ",
"avatar_url": "https://avatars.githubusercontent.com/u/113770409?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jikkuatwork",
"html_url": "https://github.com/jikkuatwork",
"followers_url": "https://api.github.com/users/jikkuatwork/followers",
"following_url": "https://api.github.com/users/jikkuatwork/following{/other_user}",
"gists_url": "https://api.github.com/users/jikkuatwork/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jikkuatwork/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jikkuatwork/subscriptions",
"organizations_url": "https://api.github.com/users/jikkuatwork/orgs",
"repos_url": "https://api.github.com/users/jikkuatwork/repos",
"events_url": "https://api.github.com/users/jikkuatwork/events{/privacy}",
"received_events_url": "https://api.github.com/users/jikkuatwork/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-03-16T06:55:03 | 2024-03-25T18:54:09 | 2024-03-25T18:54:09 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/3178",
"html_url": "https://github.com/ollama/ollama/pull/3178",
"diff_url": "https://github.com/ollama/ollama/pull/3178.diff",
"patch_url": "https://github.com/ollama/ollama/pull/3178.patch",
"merged_at": "2024-03-25T18:54:09"
} | Another simple, no build, no setup web interface. | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3178/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3178/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/7284 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7284/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7284/comments | https://api.github.com/repos/ollama/ollama/issues/7284/events | https://github.com/ollama/ollama/issues/7284 | 2,601,193,879 | I_kwDOJ0Z1Ps6bCxGX | 7,284 | Is default install location configurable | {
"login": "wgong",
"id": 329928,
"node_id": "MDQ6VXNlcjMyOTkyOA==",
"avatar_url": "https://avatars.githubusercontent.com/u/329928?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/wgong",
"html_url": "https://github.com/wgong",
"followers_url": "https://api.github.com/users/wgong/followers",
"following_url": "https://api.github.com/users/wgong/following{/other_user}",
"gists_url": "https://api.github.com/users/wgong/gists{/gist_id}",
"starred_url": "https://api.github.com/users/wgong/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/wgong/subscriptions",
"organizations_url": "https://api.github.com/users/wgong/orgs",
"repos_url": "https://api.github.com/users/wgong/repos",
"events_url": "https://api.github.com/users/wgong/events{/privacy}",
"received_events_url": "https://api.github.com/users/wgong/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396220,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA",
"url": "https://api.github.com/repos/ollama/ollama/labels/question",
"name": "question",
"color": "d876e3",
"default": true,
"description": "General questions"
}
] | closed | false | null | [] | null | 2 | 2024-10-21T03:29:07 | 2024-10-22T18:46:05 | 2024-10-22T18:46:05 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I run Ollama on Ubuntu, where its default home is `/usr/share/ollama`.
Recently, I ran out of space on that partition after experimenting with quite a few models.
I then modified the `install.sh` script to install Ollama to `/opt/ollama`.
This worked when starting Ollama on the terminal by running `ollama serve`; however, when I tried running it as a systemctl service,
it failed. Checking the log via `sudo journalctl -u ollama -n 50 --no-pager`
showed an error message like "no permission to write id file in /usr/share folder".
I'm curious if `/usr/share` is hard-coded somewhere.
After creating a symlink for `/usr/share/ollama` to `/opt/ollama` and revising `/etc/systemd/system/ollama.service` to run as root, I was finally able to start the systemctl service.
Although it worked for me, I suspect it's a hack. Could you please advise on the correct way to install Ollama to a custom location and run it as a service (not as root)?
Claude Pro helped me with the above troubleshooting and workaround.
It mentioned that the following environment variables are customizable. Is this correct?
```
OLLAMA_HOST: Specifies the host and port Ollama listens on (default: 127.0.0.1:11434).
OLLAMA_MODELS: We've discussed this; it sets the directory for model storage.
OLLAMA_HOME: Sets the home directory for Ollama (default: ~/.ollama).
OLLAMA_KEEP_ALIVE: Sets the duration to keep models loaded in memory (default: 5m).
OLLAMA_TIMEOUT: Sets the timeout for loading models (default: 5m).
OLLAMA_ORIGINS: Comma-separated list of allowed origins for CORS.
OLLAMA_NOPRUNE: If set, prevents automatic removal of unused layers.
OLLAMA_DEBUGLOG: Enables debug logging when set to 'on'.
OLLAMA_VERBOSE: Enables verbose logging when set to 'on'.
OLLAMA_GPU_LAYERS: Number of layers to offload to GPU (depends on available VRAM).
OLLAMA_THREADS: Sets the number of threads to use (default is number of physical CPU cores).
OLLAMA_SKIP_UPDATE_CHECK: Skips the update check on startup if set.
OLLAMA_INSECURE: Allows insecure connections when pulling models if set.
OLLAMA_LOCKFILE: Specifies a custom path for the lockfile.
OLLAMA_LOGFILE: Specifies a custom path for the logfile.
```
Thanks
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7284/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7284/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/6697 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6697/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6697/comments | https://api.github.com/repos/ollama/ollama/issues/6697/events | https://github.com/ollama/ollama/issues/6697 | 2,512,230,580 | I_kwDOJ0Z1Ps6VvZi0 | 6,697 | IGPUMemLimit/rocmMinimumMemory are undefined | {
"login": "wangzd0209",
"id": 99313728,
"node_id": "U_kgDOBetoQA",
"avatar_url": "https://avatars.githubusercontent.com/u/99313728?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/wangzd0209",
"html_url": "https://github.com/wangzd0209",
"followers_url": "https://api.github.com/users/wangzd0209/followers",
"following_url": "https://api.github.com/users/wangzd0209/following{/other_user}",
"gists_url": "https://api.github.com/users/wangzd0209/gists{/gist_id}",
"starred_url": "https://api.github.com/users/wangzd0209/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/wangzd0209/subscriptions",
"organizations_url": "https://api.github.com/users/wangzd0209/orgs",
"repos_url": "https://api.github.com/users/wangzd0209/repos",
"events_url": "https://api.github.com/users/wangzd0209/events{/privacy}",
"received_events_url": "https://api.github.com/users/wangzd0209/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396220,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA",
"url": "https://api.github.com/repos/ollama/ollama/labels/question",
"name": "question",
"color": "d876e3",
"default": true,
"description": "General questions"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 1 | 2024-09-08T06:52:06 | 2024-09-09T16:15:44 | 2024-09-09T16:15:17 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
when i first run the code with goland,i can not compile it. it said the IGPUMemLimit/rocmMinimumMemory is undefined.
GPUMemLimit or rocmMinimumMemory is just a const. Can anyone help me?
![屏幕截图 2024-09-08 145031](https://github.com/user-attachments/assets/016bc32f-9f31-4837-813a-9004c6ea99e5)
### OS
Windows
### GPU
_No response_
### CPU
_No response_
### Ollama version
_No response_ | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6697/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6697/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/4379 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4379/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4379/comments | https://api.github.com/repos/ollama/ollama/issues/4379/events | https://github.com/ollama/ollama/pull/4379 | 2,291,383,212 | PR_kwDOJ0Z1Ps5vLtrr | 4,379 | Update `LlamaScript` to point to new link from Legacy link. | {
"login": "zanderlewis",
"id": 158775116,
"node_id": "U_kgDOCXa3TA",
"avatar_url": "https://avatars.githubusercontent.com/u/158775116?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/zanderlewis",
"html_url": "https://github.com/zanderlewis",
"followers_url": "https://api.github.com/users/zanderlewis/followers",
"following_url": "https://api.github.com/users/zanderlewis/following{/other_user}",
"gists_url": "https://api.github.com/users/zanderlewis/gists{/gist_id}",
"starred_url": "https://api.github.com/users/zanderlewis/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/zanderlewis/subscriptions",
"organizations_url": "https://api.github.com/users/zanderlewis/orgs",
"repos_url": "https://api.github.com/users/zanderlewis/repos",
"events_url": "https://api.github.com/users/zanderlewis/events{/privacy}",
"received_events_url": "https://api.github.com/users/zanderlewis/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-12T15:25:07 | 2024-05-14T01:08:32 | 2024-05-14T01:08:32 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4379",
"html_url": "https://github.com/ollama/ollama/pull/4379",
"diff_url": "https://github.com/ollama/ollama/pull/4379.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4379.patch",
"merged_at": "2024-05-14T01:08:32"
} | Still used Legacy link. | {
"login": "joshyan1",
"id": 76125168,
"node_id": "MDQ6VXNlcjc2MTI1MTY4",
"avatar_url": "https://avatars.githubusercontent.com/u/76125168?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/joshyan1",
"html_url": "https://github.com/joshyan1",
"followers_url": "https://api.github.com/users/joshyan1/followers",
"following_url": "https://api.github.com/users/joshyan1/following{/other_user}",
"gists_url": "https://api.github.com/users/joshyan1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/joshyan1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/joshyan1/subscriptions",
"organizations_url": "https://api.github.com/users/joshyan1/orgs",
"repos_url": "https://api.github.com/users/joshyan1/repos",
"events_url": "https://api.github.com/users/joshyan1/events{/privacy}",
"received_events_url": "https://api.github.com/users/joshyan1/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4379/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4379/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5083 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5083/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5083/comments | https://api.github.com/repos/ollama/ollama/issues/5083/events | https://github.com/ollama/ollama/issues/5083 | 2,355,837,403 | I_kwDOJ0Z1Ps6Maznb | 5,083 | Cannot run in musl and busybox core systems | {
"login": "asimovc",
"id": 142914286,
"node_id": "U_kgDOCISy7g",
"avatar_url": "https://avatars.githubusercontent.com/u/142914286?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/asimovc",
"html_url": "https://github.com/asimovc",
"followers_url": "https://api.github.com/users/asimovc/followers",
"following_url": "https://api.github.com/users/asimovc/following{/other_user}",
"gists_url": "https://api.github.com/users/asimovc/gists{/gist_id}",
"starred_url": "https://api.github.com/users/asimovc/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/asimovc/subscriptions",
"organizations_url": "https://api.github.com/users/asimovc/orgs",
"repos_url": "https://api.github.com/users/asimovc/repos",
"events_url": "https://api.github.com/users/asimovc/events{/privacy}",
"received_events_url": "https://api.github.com/users/asimovc/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 5755339642,
"node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg",
"url": "https://api.github.com/repos/ollama/ollama/labels/linux",
"name": "linux",
"color": "516E70",
"default": false,
"description": ""
}
] | open | false | null | [] | null | 7 | 2024-06-16T15:24:14 | 2024-06-24T06:16:51 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
When i install ollama in my musl based [distro](kisslinux.github.io) it installs but cannot execute, i think is because the binarie is linked against glibc in system. Also ollamas need `lspci -d` option in the script install for the detection of GPU and busybox don't have that option, so the install script is not core agnostic.
### OS
Linux
### GPU
AMD
### CPU
AMD
### Ollama version
0.1.44 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5083/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5083/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/7157 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7157/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7157/comments | https://api.github.com/repos/ollama/ollama/issues/7157/events | https://github.com/ollama/ollama/pull/7157 | 2,577,084,140 | PR_kwDOJ0Z1Ps5-Iy5- | 7,157 | Remove submodule and shift to Go server - 0.4.0 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2024-10-09T22:33:44 | 2024-10-30T17:34:32 | 2024-10-30T17:34:28 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/7157",
"html_url": "https://github.com/ollama/ollama/pull/7157",
"diff_url": "https://github.com/ollama/ollama/pull/7157.diff",
"patch_url": "https://github.com/ollama/ollama/pull/7157.patch",
"merged_at": "2024-10-30T17:34:28"
} | The Go server is now available in RC form at https://github.com/ollama/ollama/releases with 0.4.0
These changes are also in [dhiltgen/remove_submodule](https://github.com/ollama/ollama/tree/dhiltgen/remove_submodule) which is currently being used to build the RC's for the release. As we near finalizing the release, this PR can be merged to main, but we will no longer be able to build the C++ server (0.3.x releases) from main at that point.
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7157/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7157/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/2139 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2139/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2139/comments | https://api.github.com/repos/ollama/ollama/issues/2139/events | https://github.com/ollama/ollama/issues/2139 | 2,094,478,009 | I_kwDOJ0Z1Ps581zK5 | 2,139 | Ollama doesn't generate text in newer version of llama index | {
"login": "Bearsaerker",
"id": 92314812,
"node_id": "U_kgDOBYCcvA",
"avatar_url": "https://avatars.githubusercontent.com/u/92314812?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Bearsaerker",
"html_url": "https://github.com/Bearsaerker",
"followers_url": "https://api.github.com/users/Bearsaerker/followers",
"following_url": "https://api.github.com/users/Bearsaerker/following{/other_user}",
"gists_url": "https://api.github.com/users/Bearsaerker/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Bearsaerker/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Bearsaerker/subscriptions",
"organizations_url": "https://api.github.com/users/Bearsaerker/orgs",
"repos_url": "https://api.github.com/users/Bearsaerker/repos",
"events_url": "https://api.github.com/users/Bearsaerker/events{/privacy}",
"received_events_url": "https://api.github.com/users/Bearsaerker/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-01-22T18:12:17 | 2024-01-22T18:13:41 | 2024-01-22T18:13:41 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I have a peculiar problem. As of llama index version 0.9.22 ollama is not able to produce text for me. I downgraded and tested almost all versions from 0.9.1 up to 0.9.21. 0.9.21 is the last version in which ollama is able to produce text with llama index.
I have it integrated as " llm = Ollama(model="Solar", temperature=0.2)" Adding request_timeout doesn't change a thing unfortunately | {
"login": "Bearsaerker",
"id": 92314812,
"node_id": "U_kgDOBYCcvA",
"avatar_url": "https://avatars.githubusercontent.com/u/92314812?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Bearsaerker",
"html_url": "https://github.com/Bearsaerker",
"followers_url": "https://api.github.com/users/Bearsaerker/followers",
"following_url": "https://api.github.com/users/Bearsaerker/following{/other_user}",
"gists_url": "https://api.github.com/users/Bearsaerker/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Bearsaerker/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Bearsaerker/subscriptions",
"organizations_url": "https://api.github.com/users/Bearsaerker/orgs",
"repos_url": "https://api.github.com/users/Bearsaerker/repos",
"events_url": "https://api.github.com/users/Bearsaerker/events{/privacy}",
"received_events_url": "https://api.github.com/users/Bearsaerker/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2139/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/2139/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5640 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5640/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5640/comments | https://api.github.com/repos/ollama/ollama/issues/5640/events | https://github.com/ollama/ollama/issues/5640 | 2,404,211,759 | I_kwDOJ0Z1Ps6PTVwv | 5,640 | Pass array of messages as an argument | {
"login": "M3cubo",
"id": 1382596,
"node_id": "MDQ6VXNlcjEzODI1OTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/1382596?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/M3cubo",
"html_url": "https://github.com/M3cubo",
"followers_url": "https://api.github.com/users/M3cubo/followers",
"following_url": "https://api.github.com/users/M3cubo/following{/other_user}",
"gists_url": "https://api.github.com/users/M3cubo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/M3cubo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/M3cubo/subscriptions",
"organizations_url": "https://api.github.com/users/M3cubo/orgs",
"repos_url": "https://api.github.com/users/M3cubo/repos",
"events_url": "https://api.github.com/users/M3cubo/events{/privacy}",
"received_events_url": "https://api.github.com/users/M3cubo/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | null | [] | null | 2 | 2024-07-11T22:03:16 | 2024-07-15T11:01:24 | 2024-07-15T11:01:24 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | In the Docs, it shows that using the API you can pass an array of messages:
"messages": [
{ "role": "user", "content": "why is the sky blue?" }
]
My question is, how can I do it with the CLI? It is possible?
I'm looking into something like:
> ollama run "model" "prompt" "messages"
where the argument "messages" could be used to give context.
Do you have any ideas on how to do it?
| {
"login": "M3cubo",
"id": 1382596,
"node_id": "MDQ6VXNlcjEzODI1OTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/1382596?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/M3cubo",
"html_url": "https://github.com/M3cubo",
"followers_url": "https://api.github.com/users/M3cubo/followers",
"following_url": "https://api.github.com/users/M3cubo/following{/other_user}",
"gists_url": "https://api.github.com/users/M3cubo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/M3cubo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/M3cubo/subscriptions",
"organizations_url": "https://api.github.com/users/M3cubo/orgs",
"repos_url": "https://api.github.com/users/M3cubo/repos",
"events_url": "https://api.github.com/users/M3cubo/events{/privacy}",
"received_events_url": "https://api.github.com/users/M3cubo/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5640/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5640/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/2400 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2400/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2400/comments | https://api.github.com/repos/ollama/ollama/issues/2400/events | https://github.com/ollama/ollama/issues/2400 | 2,124,080,088 | I_kwDOJ0Z1Ps5-muPY | 2,400 | Sending empty prompt to `llm.Predict` hangs | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 1 | 2024-02-07T23:25:46 | 2024-02-21T00:03:54 | 2024-02-21T00:03:53 | MEMBER | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | This is a less severe/internal version of https://github.com/ollama/ollama/issues/2397, where sending an empty prompt `""` to the runner causes a hang. | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2400/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/2400/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/4673 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4673/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4673/comments | https://api.github.com/repos/ollama/ollama/issues/4673/events | https://github.com/ollama/ollama/issues/4673 | 2,320,186,365 | I_kwDOJ0Z1Ps6KSzv9 | 4,673 | BUG: PHI-3 | {
"login": "MichaelFomenko",
"id": 12229584,
"node_id": "MDQ6VXNlcjEyMjI5NTg0",
"avatar_url": "https://avatars.githubusercontent.com/u/12229584?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/MichaelFomenko",
"html_url": "https://github.com/MichaelFomenko",
"followers_url": "https://api.github.com/users/MichaelFomenko/followers",
"following_url": "https://api.github.com/users/MichaelFomenko/following{/other_user}",
"gists_url": "https://api.github.com/users/MichaelFomenko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/MichaelFomenko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/MichaelFomenko/subscriptions",
"organizations_url": "https://api.github.com/users/MichaelFomenko/orgs",
"repos_url": "https://api.github.com/users/MichaelFomenko/repos",
"events_url": "https://api.github.com/users/MichaelFomenko/events{/privacy}",
"received_events_url": "https://api.github.com/users/MichaelFomenko/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 3 | 2024-05-28T05:55:38 | 2024-06-25T08:09:29 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
When I start the Conversation in German Language, Phi-3 Mini and Medium working fine. But after some Conversations, the Models starting producing slowly Gibberish and Nonsens and repeating phrases, word and tokens and don't answering my Quaestiones anymore. When I start a new Conversation, it works fine.
Open Web UI Version: 1.1.125
### OS
Linux
### GPU
AMD
### CPU
AMD
### Ollama version
0.1.39 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4673/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4673/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/365 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/365/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/365/comments | https://api.github.com/repos/ollama/ollama/issues/365/events | https://github.com/ollama/ollama/issues/365 | 1,854,045,776 | I_kwDOJ0Z1Ps5ugn5Q | 365 | nous-hermes wrong model name? | {
"login": "carbocation",
"id": 218804,
"node_id": "MDQ6VXNlcjIxODgwNA==",
"avatar_url": "https://avatars.githubusercontent.com/u/218804?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/carbocation",
"html_url": "https://github.com/carbocation",
"followers_url": "https://api.github.com/users/carbocation/followers",
"following_url": "https://api.github.com/users/carbocation/following{/other_user}",
"gists_url": "https://api.github.com/users/carbocation/gists{/gist_id}",
"starred_url": "https://api.github.com/users/carbocation/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/carbocation/subscriptions",
"organizations_url": "https://api.github.com/users/carbocation/orgs",
"repos_url": "https://api.github.com/users/carbocation/repos",
"events_url": "https://api.github.com/users/carbocation/events{/privacy}",
"received_events_url": "https://api.github.com/users/carbocation/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 1 | 2023-08-17T00:01:57 | 2023-08-17T03:42:33 | 2023-08-17T03:42:33 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | The README currently says:
https://github.com/jmorganca/ollama/blob/5ee611642049e9e4b8facb865325b33cb7343f06/README.md?plain=1#L42
But that pulls a 3GB model. Shouldn't this instead be suffixed with `:13b` like so?
| Model | Parameters | Size | Download |
| ------------------------ | ---------- | ----- | ------------------------------- |
| Nous-Hermes | 13B | 7.3GB | `ollama pull nous-hermes:13b` | | {
"login": "mchiang0610",
"id": 3325447,
"node_id": "MDQ6VXNlcjMzMjU0NDc=",
"avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mchiang0610",
"html_url": "https://github.com/mchiang0610",
"followers_url": "https://api.github.com/users/mchiang0610/followers",
"following_url": "https://api.github.com/users/mchiang0610/following{/other_user}",
"gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions",
"organizations_url": "https://api.github.com/users/mchiang0610/orgs",
"repos_url": "https://api.github.com/users/mchiang0610/repos",
"events_url": "https://api.github.com/users/mchiang0610/events{/privacy}",
"received_events_url": "https://api.github.com/users/mchiang0610/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/365/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/365/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3787 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3787/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3787/comments | https://api.github.com/repos/ollama/ollama/issues/3787/events | https://github.com/ollama/ollama/issues/3787 | 2,254,727,557 | I_kwDOJ0Z1Ps6GZGmF | 3,787 | OOM with mixtral 8x22b | {
"login": "bozo32",
"id": 102033973,
"node_id": "U_kgDOBhTqNQ",
"avatar_url": "https://avatars.githubusercontent.com/u/102033973?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bozo32",
"html_url": "https://github.com/bozo32",
"followers_url": "https://api.github.com/users/bozo32/followers",
"following_url": "https://api.github.com/users/bozo32/following{/other_user}",
"gists_url": "https://api.github.com/users/bozo32/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bozo32/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bozo32/subscriptions",
"organizations_url": "https://api.github.com/users/bozo32/orgs",
"repos_url": "https://api.github.com/users/bozo32/repos",
"events_url": "https://api.github.com/users/bozo32/events{/privacy}",
"received_events_url": "https://api.github.com/users/bozo32/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 6430601766,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg",
"url": "https://api.github.com/repos/ollama/ollama/labels/nvidia",
"name": "nvidia",
"color": "8CDB00",
"default": false,
"description": "Issues relating to Nvidia GPUs and CUDA"
},
{
"id": 6849881759,
"node_id": "LA_kwDOJ0Z1Ps8AAAABmEjmnw",
"url": "https://api.github.com/repos/ollama/ollama/labels/memory",
"name": "memory",
"color": "5017EA",
"default": false,
"description": ""
}
] | closed | false | null | [] | null | 7 | 2024-04-20T21:22:53 | 2024-05-18T07:25:03 | 2024-05-18T07:25:03 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
OOM with mixtral on an A100 80gb. gets 47/57 layers onto the GPU and then chokes.
running off the binary.
just redownloaded it and re-ran and still got the same issue
no probs with models that fit entirely into vram.
(base) tamas002@gpun201:~/ai$ ./ollama run mixtral:8x22b-instruct-v0.1-q5_K_M
[GIN] 2024/04/20 - 23:17:40 | 200 | 13.807µs | 127.0.0.1 | HEAD "/"
[GIN] 2024/04/20 - 23:17:40 | 200 | 1.937606ms | 127.0.0.1 | POST "/api/show"
[GIN] 2024/04/20 - 23:17:40 | 200 | 2.0645ms | 127.0.0.1 | POST "/api/show"
⠙ time=2024-04-20T23:17:40.214+02:00 level=INFO source=gpu.go:121 msg="Detecting GPU type"
time=2024-04-20T23:17:40.214+02:00 level=INFO source=gpu.go:268 msg="Searching for GPU management library libcudart.so*"
time=2024-04-20T23:17:40.215+02:00 level=INFO source=gpu.go:314 msg="Discovered GPU libraries: [/tmp/ollama3504175971/runners/cuda_v11/libcudart.so.11.0]"
time=2024-04-20T23:17:40.219+02:00 level=INFO source=gpu.go:126 msg="Nvidia GPU detected via cudart"
time=2024-04-20T23:17:40.219+02:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2"
⠹ time=2024-04-20T23:17:40.334+02:00 level=INFO source=gpu.go:202 msg="[cudart] CUDART CUDA Compute Capability detected: 8.0"
time=2024-04-20T23:17:40.397+02:00 level=INFO source=gpu.go:121 msg="Detecting GPU type"
time=2024-04-20T23:17:40.397+02:00 level=INFO source=gpu.go:268 msg="Searching for GPU management library libcudart.so*"
time=2024-04-20T23:17:40.398+02:00 level=INFO source=gpu.go:314 msg="Discovered GPU libraries: [/tmp/ollama3504175971/runners/cuda_v11/libcudart.so.11.0]"
time=2024-04-20T23:17:40.400+02:00 level=INFO source=gpu.go:126 msg="Nvidia GPU detected via cudart"
time=2024-04-20T23:17:40.400+02:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2"
⠸ time=2024-04-20T23:17:40.510+02:00 level=INFO source=gpu.go:202 msg="[cudart] CUDART CUDA Compute Capability detected: 8.0"
⠼ time=2024-04-20T23:17:40.573+02:00 level=INFO source=server.go:127 msg="offload to gpu" reallayers=47 layers=47 required="96360.4 MiB" used="80509.6 MiB" available="80627.6 MiB" kv="448.0 MiB" fulloffload="244.0 MiB" partialoffload="256.3 MiB"
time=2024-04-20T23:17:40.573+02:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2"
time=2024-04-20T23:17:40.573+02:00 level=INFO source=server.go:264 msg="starting llama server" cmd="/tmp/ollama3504175971/runners/cuda_v11/ollama_llama_server --model /home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52 --ctx-size 2048 --batch-size 512 --embedding --log-disable --n-gpu-layers 47 --port 34211"
time=2024-04-20T23:17:40.573+02:00 level=INFO source=server.go:389 msg="waiting for llama runner to start responding"
⠇ {"function":"server_params_parse","level":"INFO","line":2603,"msg":"logging to file is disabled.","tid":"140660910362624","timestamp":1713647860}
{"build":1,"commit":"7593639","function":"main","level":"INFO","line":2819,"msg":"build info","tid":"140660910362624","timestamp":1713647860}
{"function":"main","level":"INFO","line":2822,"msg":"system info","n_threads":16,"n_threads_batch":-1,"system_info":"AVX =1 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 0 | ARM_FMA = 0 | F16C = 0| FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | ","tid":"140660910362624","timestamp":1713647860,"total_threads":32}
llama_model_loader: loaded meta data with 26 key-value pairs and 563 tensors from /home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = llama
llama_model_loader: - kv 1: general.name str = mistralai
llama_model_loader: - kv 2: llama.vocab_size u32 = 32768
llama_model_loader: - kv 3: llama.context_length u32 = 65536
llama_model_loader: - kv 4: llama.embedding_length u32 = 6144
llama_model_loader: - kv 5: llama.block_count u32 = 56
llama_model_loader: - kv 6: llama.feed_forward_length u32 = 16384
llama_model_loader: - kv 7: llama.rope.dimension_count u32 = 128
llama_model_loader: - kv 8: llama.attention.head_count u32 = 48
llama_model_loader: - kv 9: llama.attention.head_count_kv u32 = 8
llama_model_loader: - kv 10: llama.expert_count u32 = 8
llama_model_loader: - kv 11: llama.expert_used_count u32 = 2
llama_model_loader: - kv 12: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
llama_model_loader: - kv 13: llama.rope.freq_base f32 = 1000000.000000
llama_model_loader: - kv 14: general.file_type u32 = 17
llama_model_loader: - kv 15: tokenizer.ggml.model str = llama
llama_model_loader: - kv 16: tokenizer.ggml.tokens arr[str,32768] = ["<unk>", "<s>", "</s>", "[INST]", "[...
⠏ llama_model_loader: - kv 17: tokenizer.ggml.scores arr[f32,32768] = [-1000.000000, -1000.000000,-1000.00...
llama_model_loader: - kv 18: tokenizer.ggml.token_type arr[i32,32768] = [3, 3, 3, 1, 1, 1, 1, 1, 1, 1,1, 1, ...
llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 1
llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 2
llama_model_loader: - kv 21: tokenizer.ggml.unknown_token_id u32 = 0
llama_model_loader: - kv 22: tokenizer.ggml.add_bos_token bool = false
llama_model_loader: - kv 23: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 24: tokenizer.chat_template str = {{bos_token}}{% for message inmessag...
llama_model_loader: - kv 25: general.quantization_version u32 = 2
llama_model_loader: - type f32: 113 tensors
llama_model_loader: - type f16: 56 tensors
llama_model_loader: - type q8_0: 112 tensors
llama_model_loader: - type q5_K: 253 tensors
llama_model_loader: - type q6_K: 29 tensors
llm_load_vocab: mismatch in special tokens definition ( 1027/32768 vs 259/32768 ).
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = llama
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 32768
llm_load_print_meta: n_merges = 0
llm_load_print_meta: n_ctx_train = 65536
llm_load_print_meta: n_embd = 6144
llm_load_print_meta: n_head = 48
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_layer = 56
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_embd_head_k = 128
llm_load_print_meta: n_embd_head_v = 128
llm_load_print_meta: n_gqa = 6
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 16384
llm_load_print_meta: n_expert = 8
llm_load_print_meta: n_expert_used = 2
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 0
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 1000000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_yarn_orig_ctx = 65536
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: model type = 8x22B
llm_load_print_meta: model ftype = Q5_K - Medium
llm_load_print_meta: model params = 140.63 B
llm_load_print_meta: model size = 93.11 GiB (5.69 BPW)
llm_load_print_meta: general.name = mistralai
llm_load_print_meta: BOS token = 1 '<s>'
llm_load_print_meta: EOS token = 2 '</s>'
llm_load_print_meta: UNK token = 0 '<unk>'
llm_load_print_meta: LF token = 781 '<0x0A>'
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: yes
ggml_cuda_init: CUDA_USE_TENSOR_CORES: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: NVIDIA A100 80GB PCIe, compute capability 8.0, VMM: yes
⠋ llm_load_tensors: ggml ctx size = 0.77 MiB
⠸ llm_load_tensors: offloading 47 repeating layers to GPU
llm_load_tensors: offloaded 47/57 layers to GPU
llm_load_tensors: CPU buffer size = 18753.40 MiB
llm_load_tensors: CUDA0 buffer size = 79522.36 MiB
⠹ .
llama_new_context_with_model: n_ctx = 2048
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: freq_base = 1000000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA_Host KV buffer size = 72.00 MiB
llama_kv_cache_init: CUDA0 KV buffer size = 376.00 MiB
llama_new_context_with_model: KV self size = 448.00 MiB, K (f16): 224.00 MiB, V (f16): 224.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.15 MiB
ggml_backend_cuda_buffer_type_alloc_buffer: allocating 1766.75 MiB on device 0: cudaMalloc failed: out of memory
ggml_gallocr_reserve_n: failed to allocate CUDA0 buffer of size 1852573696
llama_new_context_with_model: failed to allocate compute buffers
llama_init_from_gpt_params: error: failed to create context with model '/home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52'
⠴ {"function":"load_model","level":"ERR","line":410,"model":"/home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52","msg":"unable to load model","tid":"140660910362624","timestamp":1713647871}
⠧ time=2024-04-20T23:17:51.849+02:00 level=ERROR source=routes.go:120 msg="error loading llama server" error="llama runnerprocess no longer running: 1 error:failed to create context with model '/home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52'"
[GIN] 2024/04/20 - 23:17:51 | 500 | 11.732787815s | 127.0.0.1 | POST "/api/chat"
Error: llama runner process no longer running: 1 error:failed to create context with model '/home/WUR/tamas002/.ollama/models/blobs/sha256-630983e98a0c92b38850c213cb1d4a8a724635ccabf84fdf70f3fad6a862ce52'
### OS
Linux
### GPU
Nvidia
### CPU
Intel
### Ollama version
0.1.32 @ 20 April | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3787/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3787/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/996 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/996/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/996/comments | https://api.github.com/repos/ollama/ollama/issues/996/events | https://github.com/ollama/ollama/pull/996 | 1,977,265,901 | PR_kwDOJ0Z1Ps5el7il | 996 | Add gen.nvim as community contribution | {
"login": "David-Kunz",
"id": 1009936,
"node_id": "MDQ6VXNlcjEwMDk5MzY=",
"avatar_url": "https://avatars.githubusercontent.com/u/1009936?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/David-Kunz",
"html_url": "https://github.com/David-Kunz",
"followers_url": "https://api.github.com/users/David-Kunz/followers",
"following_url": "https://api.github.com/users/David-Kunz/following{/other_user}",
"gists_url": "https://api.github.com/users/David-Kunz/gists{/gist_id}",
"starred_url": "https://api.github.com/users/David-Kunz/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/David-Kunz/subscriptions",
"organizations_url": "https://api.github.com/users/David-Kunz/orgs",
"repos_url": "https://api.github.com/users/David-Kunz/repos",
"events_url": "https://api.github.com/users/David-Kunz/events{/privacy}",
"received_events_url": "https://api.github.com/users/David-Kunz/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2023-11-04T10:08:26 | 2023-11-06T18:51:41 | 2023-11-06T18:51:41 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/996",
"html_url": "https://github.com/ollama/ollama/pull/996",
"diff_url": "https://github.com/ollama/ollama/pull/996.diff",
"patch_url": "https://github.com/ollama/ollama/pull/996.patch",
"merged_at": "2023-11-06T18:51:41"
} | Hi,
[gen.nvim](https://github.com/David-Kunz/gen.nvim) is a Neovim extension from which you can invoke Ollama.
Best regards,
David | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/996/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/996/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/7558 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7558/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7558/comments | https://api.github.com/repos/ollama/ollama/issues/7558/events | https://github.com/ollama/ollama/issues/7558 | 2,641,293,364 | I_kwDOJ0Z1Ps6dbvA0 | 7,558 | llama3.2-vision crash on multiple cuda GPUs - unspecified launch failure | {
"login": "HuronExplodium",
"id": 124458994,
"node_id": "U_kgDOB2sX8g",
"avatar_url": "https://avatars.githubusercontent.com/u/124458994?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/HuronExplodium",
"html_url": "https://github.com/HuronExplodium",
"followers_url": "https://api.github.com/users/HuronExplodium/followers",
"following_url": "https://api.github.com/users/HuronExplodium/following{/other_user}",
"gists_url": "https://api.github.com/users/HuronExplodium/gists{/gist_id}",
"starred_url": "https://api.github.com/users/HuronExplodium/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/HuronExplodium/subscriptions",
"organizations_url": "https://api.github.com/users/HuronExplodium/orgs",
"repos_url": "https://api.github.com/users/HuronExplodium/repos",
"events_url": "https://api.github.com/users/HuronExplodium/events{/privacy}",
"received_events_url": "https://api.github.com/users/HuronExplodium/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 5755339642,
"node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg",
"url": "https://api.github.com/repos/ollama/ollama/labels/linux",
"name": "linux",
"color": "516E70",
"default": false,
"description": ""
},
{
"id": 6430601766,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg",
"url": "https://api.github.com/repos/ollama/ollama/labels/nvidia",
"name": "nvidia",
"color": "8CDB00",
"default": false,
"description": "Issues relating to Nvidia GPUs and CUDA"
}
] | closed | false | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 13 | 2024-11-07T15:00:28 | 2024-11-14T17:40:05 | 2024-11-14T17:40:05 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
Running on 3.2-vision:11b works with text and images
Running on 3.2-vision:90b works with text, segfault on images
Running llava: works with text and images
Debug log from segfault with text and image:
mllama_model_load: description: vision encoder for Mllama
mllama_model_load: GGUF version: 3
mllama_model_load: alignment: 32
mllama_model_load: n_tensors: 512
mllama_model_load: n_kv: 17
mllama_model_load: ftype: f16
mllama_model_load:
mllama_model_load: vision using CUDA backend
time=2024-11-07T04:46:42.696Z level=DEBUG source=server.go:615 msg="model load completed, waiting for server to become available" status="llm server loading model"
mllama_model_load: compute allocated memory: 2853.34 MB
time=2024-11-07T04:46:43.199Z level=INFO source=server.go:606 msg="llama runner started in 23.62 seconds"
time=2024-11-07T04:46:43.199Z level=DEBUG source=sched.go:462 msg="finished setting up runner" model=/home/user/.ollama/models/blobs/sha256-da63a910e34997d50c9f21cc7f16996d1e76e1c128b13319edd68348f760ecc7
time=2024-11-07T04:46:43.452Z level=DEBUG source=routes.go:1457 msg="chat request" images=1 prompt="<|start_header_id|>user<|end_header_id|>\n\n[img-0]<|image|>this is a random frame. describe in detail everything you can interpret. Ideally keep your response concise and information dense since this will be read in a chat room. ANYTHING OVER 200 CHARACTERS WILL BE CUT OFF.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
time=2024-11-07T04:46:46.139Z level=DEBUG source=image.go:175 msg="storing image embeddings in cache" entry=0 used=0001-01-01T00:00:00.000Z
time=2024-11-07T04:46:46.139Z level=DEBUG source=cache.go:99 msg="loading cache slot" id=0 cache=0 prompt=61 used=0 remaining=61
CUDA error: unspecified launch failure
current device: 3, in function ggml_backend_cuda_synchronize at ggml-cuda.cu:2508
cudaStreamSynchronize(cuda_ctx->stream())
ggml-cuda.cu:132: CUDA error
SIGSEGV: segmentation violation
PC=0x72e7ad3ecc57 m=7 sigcode=1 addr=0x213203fcc
signal arrived during cgo execution
goroutine 7 gp=0xc00029c000 m=7 mp=0xc000302008 [syscall]:
runtime.cgocall(0x60e54306eeb0, 0xc000185b60)
runtime/cgocall.go:157 +0x4b fp=0xc000185b38 sp=0xc000185b00 pc=0x60e542df13cb
github.com/ollama/ollama/llama._Cfunc_llama_decode(0x72e7180064a0, {0x37, 0x72e7183ae990, 0x0, 0x0, 0x72e7183af1a0, 0x72e7183af9b0, 0x72e7183b01c0, 0x72d1281ef3e0, 0x0, ...})
_cgo_gotypes.go:543 +0x52 fp=0xc000185b60 sp=0xc000185b38 pc=0x60e542eee952
github.com/ollama/ollama/llama.(*Context).Decode.func1(0x60e54306aceb?, 0x72e7180064a0?)
github.com/ollama/ollama/llama/llama.go:167 +0xd8 fp=0xc000185c80 sp=0xc000185b60 pc=0x60e542ef0e78
github.com/ollama/ollama/llama.(*Context).Decode(0xc0001ec140?, 0x1?)
github.com/ollama/ollama/llama/llama.go:167 +0x17 fp=0xc000185cc8 sp=0xc000185c80 pc=0x60e542ef0cd7
main.(*Server).processBatch(0xc0001d0120, 0xc000234000, 0xc000234070)
github.com/ollama/ollama/llama/runner/runner.go:424 +0x29e fp=0xc000185ed0 sp=0xc000185cc8 pc=0x60e543069d1e
main.(*Server).run(0xc0001d0120, {0x60e5433a8a40, 0xc0001a60a0})
github.com/ollama/ollama/llama/runner/runner.go:338 +0x1a5 fp=0xc000185fb8 sp=0xc000185ed0 pc=0x60e543069705
main.main.gowrap2()
github.com/ollama/ollama/llama/runner/runner.go:907 +0x28 fp=0xc000185fe0 sp=0xc000185fb8 pc=0x60e54306dee8
runtime.goexit({})
runtime/asm_amd64.s:1695 +0x1 fp=0xc000185fe8 sp=0xc000185fe0 pc=0x60e542e59de1
created by main.main in goroutine 1
github.com/ollama/ollama/llama/runner/runner.go:907 +0xcab
goroutine 1 gp=0xc0000061c0 m=nil [IO wait]:
runtime.gopark(0xc000034008?, 0x0?, 0xc0?, 0x61?, 0xc00002d8c0?)
runtime/proc.go:402 +0xce fp=0xc0001f5888 sp=0xc0001f5868 pc=0x60e542e2800e
runtime.netpollblock(0xc00002d920?, 0x42df0b26?, 0xe5?)
### OS
_No response_
### GPU
Nvidia
### CPU
_No response_
### Ollama version
0.4 | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7558/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7558/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/7981 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7981/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7981/comments | https://api.github.com/repos/ollama/ollama/issues/7981/events | https://github.com/ollama/ollama/issues/7981 | 2,724,269,697 | I_kwDOJ0Z1Ps6iYQ6B | 7,981 | Internet Access To The Model | {
"login": "dragonked2",
"id": 66541902,
"node_id": "MDQ6VXNlcjY2NTQxOTAy",
"avatar_url": "https://avatars.githubusercontent.com/u/66541902?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dragonked2",
"html_url": "https://github.com/dragonked2",
"followers_url": "https://api.github.com/users/dragonked2/followers",
"following_url": "https://api.github.com/users/dragonked2/following{/other_user}",
"gists_url": "https://api.github.com/users/dragonked2/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dragonked2/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dragonked2/subscriptions",
"organizations_url": "https://api.github.com/users/dragonked2/orgs",
"repos_url": "https://api.github.com/users/dragonked2/repos",
"events_url": "https://api.github.com/users/dragonked2/events{/privacy}",
"received_events_url": "https://api.github.com/users/dragonked2/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | null | [] | null | 2 | 2024-12-07T02:22:24 | 2024-12-20T22:17:02 | 2024-12-20T22:17:01 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | i request to add Internet Access To The Model so it can use browser and crawl required data | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7981/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7981/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3345 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3345/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3345/comments | https://api.github.com/repos/ollama/ollama/issues/3345/events | https://github.com/ollama/ollama/pull/3345 | 2,206,483,529 | PR_kwDOJ0Z1Ps5qsnvd | 3,345 | [wip] adds a welcome message to the interactive mode | {
"login": "xbasset",
"id": 8493278,
"node_id": "MDQ6VXNlcjg0OTMyNzg=",
"avatar_url": "https://avatars.githubusercontent.com/u/8493278?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/xbasset",
"html_url": "https://github.com/xbasset",
"followers_url": "https://api.github.com/users/xbasset/followers",
"following_url": "https://api.github.com/users/xbasset/following{/other_user}",
"gists_url": "https://api.github.com/users/xbasset/gists{/gist_id}",
"starred_url": "https://api.github.com/users/xbasset/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/xbasset/subscriptions",
"organizations_url": "https://api.github.com/users/xbasset/orgs",
"repos_url": "https://api.github.com/users/xbasset/repos",
"events_url": "https://api.github.com/users/xbasset/events{/privacy}",
"received_events_url": "https://api.github.com/users/xbasset/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-03-25T19:05:11 | 2024-03-27T16:15:41 | 2024-03-27T16:15:31 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | true | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/3345",
"html_url": "https://github.com/ollama/ollama/pull/3345",
"diff_url": "https://github.com/ollama/ollama/pull/3345.diff",
"patch_url": "https://github.com/ollama/ollama/pull/3345.patch",
"merged_at": null
} | Suggestion to add a welcome message to give clarity on the model / version of the model currently used in interactive model
Following that conversation on twitter
https://x.com/xbasset/status/1771934995738706322?s=20 | {
"login": "xbasset",
"id": 8493278,
"node_id": "MDQ6VXNlcjg0OTMyNzg=",
"avatar_url": "https://avatars.githubusercontent.com/u/8493278?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/xbasset",
"html_url": "https://github.com/xbasset",
"followers_url": "https://api.github.com/users/xbasset/followers",
"following_url": "https://api.github.com/users/xbasset/following{/other_user}",
"gists_url": "https://api.github.com/users/xbasset/gists{/gist_id}",
"starred_url": "https://api.github.com/users/xbasset/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/xbasset/subscriptions",
"organizations_url": "https://api.github.com/users/xbasset/orgs",
"repos_url": "https://api.github.com/users/xbasset/repos",
"events_url": "https://api.github.com/users/xbasset/events{/privacy}",
"received_events_url": "https://api.github.com/users/xbasset/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3345/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3345/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/7062 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7062/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7062/comments | https://api.github.com/repos/ollama/ollama/issues/7062/events | https://github.com/ollama/ollama/issues/7062 | 2,559,321,173 | I_kwDOJ0Z1Ps6YjCRV | 7,062 | Mistral Pixtral 12B | {
"login": "RajbirSehrawat",
"id": 18544802,
"node_id": "MDQ6VXNlcjE4NTQ0ODAy",
"avatar_url": "https://avatars.githubusercontent.com/u/18544802?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/RajbirSehrawat",
"html_url": "https://github.com/RajbirSehrawat",
"followers_url": "https://api.github.com/users/RajbirSehrawat/followers",
"following_url": "https://api.github.com/users/RajbirSehrawat/following{/other_user}",
"gists_url": "https://api.github.com/users/RajbirSehrawat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/RajbirSehrawat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/RajbirSehrawat/subscriptions",
"organizations_url": "https://api.github.com/users/RajbirSehrawat/orgs",
"repos_url": "https://api.github.com/users/RajbirSehrawat/repos",
"events_url": "https://api.github.com/users/RajbirSehrawat/events{/privacy}",
"received_events_url": "https://api.github.com/users/RajbirSehrawat/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5789807732,
"node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA",
"url": "https://api.github.com/repos/ollama/ollama/labels/model%20request",
"name": "model request",
"color": "1E5DE6",
"default": false,
"description": "Model requests"
}
] | closed | false | null | [] | null | 2 | 2024-10-01T13:51:40 | 2024-10-03T16:58:44 | 2024-10-03T16:58:44 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Can you please add Pixtral 12B in the list, while i am trying to install not able to use this model. | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7062/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7062/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8218 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8218/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8218/comments | https://api.github.com/repos/ollama/ollama/issues/8218/events | https://github.com/ollama/ollama/issues/8218 | 2,756,025,838 | I_kwDOJ0Z1Ps6kRZ3u | 8,218 | Question: Commercial Usage License Confirmation and Data Collection Clarification | {
"login": "ttamoud",
"id": 57901415,
"node_id": "MDQ6VXNlcjU3OTAxNDE1",
"avatar_url": "https://avatars.githubusercontent.com/u/57901415?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ttamoud",
"html_url": "https://github.com/ttamoud",
"followers_url": "https://api.github.com/users/ttamoud/followers",
"following_url": "https://api.github.com/users/ttamoud/following{/other_user}",
"gists_url": "https://api.github.com/users/ttamoud/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ttamoud/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ttamoud/subscriptions",
"organizations_url": "https://api.github.com/users/ttamoud/orgs",
"repos_url": "https://api.github.com/users/ttamoud/repos",
"events_url": "https://api.github.com/users/ttamoud/events{/privacy}",
"received_events_url": "https://api.github.com/users/ttamoud/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | open | false | null | [] | null | 0 | 2024-12-23T12:58:03 | 2024-12-23T12:58:03 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | First, I want to express my sincere appreciation for Ollama. I've been using it daily in my development workflow, and it has significantly improved my productivity. The speed and ease of use are remarkable, and I'm constantly impressed by the ongoing improvements.
Details:
As a happy user looking to expand usage, I have two specific questions regarding enterprise implementation:
License Compliance: While I understand Ollama is under the MIT license, I'd like explicit confirmation about commercial usage within a company setting. We want to ensure we're fully compliant with all licensing requirements.
Data Collection: Could you please clarify if the Ollama software itself includes any form of data collection or telemetry? To be specific, I'm not asking about API calls to external LLM providers, but rather about any data collection built into Ollama itself.
Thank you again for this fantastic tool. Your clarification would help us move forward with broader implementation plans. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8218/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8218/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4964 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4964/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4964/comments | https://api.github.com/repos/ollama/ollama/issues/4964/events | https://github.com/ollama/ollama/issues/4964 | 2,344,458,655 | I_kwDOJ0Z1Ps6LvZmf | 4,964 | ollama run qwen2:72b-instruct-q2_K but Error: llama runner process has terminated: signal: aborted (core dumped) | {
"login": "mikestut",
"id": 88723510,
"node_id": "MDQ6VXNlcjg4NzIzNTEw",
"avatar_url": "https://avatars.githubusercontent.com/u/88723510?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mikestut",
"html_url": "https://github.com/mikestut",
"followers_url": "https://api.github.com/users/mikestut/followers",
"following_url": "https://api.github.com/users/mikestut/following{/other_user}",
"gists_url": "https://api.github.com/users/mikestut/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mikestut/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mikestut/subscriptions",
"organizations_url": "https://api.github.com/users/mikestut/orgs",
"repos_url": "https://api.github.com/users/mikestut/repos",
"events_url": "https://api.github.com/users/mikestut/events{/privacy}",
"received_events_url": "https://api.github.com/users/mikestut/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 5 | 2024-06-10T17:24:32 | 2024-07-01T08:55:43 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
6月 11 01:17:54 Venue-vPro ollama[2760]: time=2024-06-11T01:17:54.332+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_vocab: special tokens cache size = 421
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_vocab: token to piece cache size = 1.8703 MB
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: format = GGUF V3 (latest)
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: arch = qwen2
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: vocab type = BPE
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_vocab = 152064
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_merges = 151387
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_ctx_train = 32768
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_embd = 8192
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_head = 64
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_head_kv = 8
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_layer = 80
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_rot = 128
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_embd_head_k = 128
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_embd_head_v = 128
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_gqa = 8
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_embd_k_gqa = 1024
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_embd_v_gqa = 1024
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: f_norm_eps = 0.0e+00
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: f_norm_rms_eps = 1.0e-06
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: f_clamp_kqv = 0.0e+00
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: f_logit_scale = 0.0e+00
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_ff = 29568
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_expert = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_expert_used = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: causal attn = 1
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: pooling type = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: rope type = 2
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: rope scaling = linear
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: freq_base_train = 1000000.0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: freq_scale_train = 1
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: n_yarn_orig_ctx = 32768
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: rope_finetuned = unknown
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: ssm_d_conv = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: ssm_d_inner = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: ssm_d_state = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: ssm_dt_rank = 0
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: model type = 70B
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: model ftype = Q2_K - Medium
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: model params = 72.71 B
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: model size = 27.76 GiB (3.28 BPW)
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: general.name = Qwen2-72B-Instruct
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: BOS token = 151643 '<|endoftext|>'
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: EOS token = 151645 '<|im_end|>'
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: PAD token = 151643 '<|endoftext|>'
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: LF token = 148848 'ÄĬ'
6月 11 01:17:54 Venue-vPro ollama[2760]: llm_load_print_meta: EOT token = 151645 '<|im_end|>'
6月 11 01:17:54 Venue-vPro ollama[2760]: ggml_cuda_init: GGML_CUDA_FORCE_MMQ: yes
6月 11 01:17:54 Venue-vPro ollama[2760]: ggml_cuda_init: CUDA_USE_TENSOR_CORES: no
6月 11 01:17:54 Venue-vPro ollama[2760]: ggml_cuda_init: found 2 CUDA devices:
6月 11 01:17:54 Venue-vPro ollama[2760]: Device 0: Tesla M40 24GB, compute capability 5.2, VMM: yes
6月 11 01:17:54 Venue-vPro ollama[2760]: Device 1: Tesla M40 24GB, compute capability 5.2, VMM: yes
6月 11 01:17:55 Venue-vPro ollama[2760]: llm_load_tensors: ggml ctx size = 1.38 MiB
6月 11 01:17:55 Venue-vPro ollama[2760]: time=2024-06-11T01:17:55.789+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:17:56 Venue-vPro ollama[2760]: time=2024-06-11T01:17:56.152+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: offloading 80 repeating layers to GPU
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: offloading non-repeating layers to GPU
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: offloaded 81/81 layers to GPU
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: CPU buffer size = 389.81 MiB
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: CUDA0 buffer size = 13868.58 MiB
6月 11 01:17:56 Venue-vPro ollama[2760]: llm_load_tensors: CUDA1 buffer size = 14166.62 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: time=2024-06-11T01:18:00.123+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: n_ctx = 2048
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: n_batch = 512
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: n_ubatch = 512
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: flash_attn = 0
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: freq_base = 1000000.0
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: freq_scale = 1
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_kv_cache_init: CUDA0 KV buffer size = 328.00 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_kv_cache_init: CUDA1 KV buffer size = 312.00 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: KV self size = 640.00 MiB, K (f16): 320.00 MiB, V (f16): 320.00 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: CUDA_Host output buffer size = 0.61 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: pipeline parallelism enabled (n_copies=4)
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: CUDA0 compute buffer size = 400.01 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: CUDA1 compute buffer size = 400.02 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: CUDA_Host compute buffer size = 32.02 MiB
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: graph nodes = 2806
6月 11 01:18:00 Venue-vPro ollama[2760]: llama_new_context_with_model: graph splits = 3
6月 11 01:18:00 Venue-vPro ollama[2760]: time=2024-06-11T01:18:00.374+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:18:04 Venue-vPro ollama[2760]: GGML_ASSERT: /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml-cuda/dmmv.cu:653: false
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2814]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2815]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2816]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2817]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2818]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2819]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2820]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2821]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2822]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2823]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2824]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2825]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2826]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2827]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2828]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2829]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2830]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2831]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2832]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2833]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2834]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2835]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2836]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2837]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2838]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2839]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2840]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2841]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2842]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2843]
6月 11 01:18:04 Venue-vPro ollama[2849]: [New LWP 2844]
6月 11 01:18:04 Venue-vPro ollama[2849]: [Thread debugging using libthread_db enabled]
6月 11 01:18:04 Venue-vPro ollama[2849]: Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
6月 11 01:18:05 Venue-vPro ollama[2760]: time=2024-06-11T01:18:05.095+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:18:05 Venue-vPro ollama[2849]: 0x00007f4b23780c7f in __GI___wait4 (pid=2849, stat_loc=0x0, options=0, usage=0x0) at ../sysdeps/unix/sysv/linux/wait4.c:27
6月 11 01:18:05 Venue-vPro ollama[2760]: 27 ../sysdeps/unix/sysv/linux/wait4.c: No such file or directory.
6月 11 01:18:05 Venue-vPro ollama[2849]: #0 0x00007f4b23780c7f in __GI___wait4 (pid=2849, stat_loc=0x0, options=0, usage=0x0) at ../sysdeps/unix/sysv/linux/wait4.>
6月 11 01:18:05 Venue-vPro ollama[2849]: 27 in ../sysdeps/unix/sysv/linux/wait4.c
6月 11 01:18:05 Venue-vPro ollama[2849]: #1 0x00000000005febbb in ggml_print_backtrace ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #2 0x00000000006b5dbc in ggml_cuda_op_dequantize_mul_mat_vec(ggml_backend_cuda_context&, ggml_tensor const*, ggml_tensor >
6月 11 01:18:05 Venue-vPro ollama[2849]: #3 0x000000000068356a in ggml_cuda_op_mul_mat(ggml_backend_cuda_context&, ggml_tensor const*, ggml_tensor const*, ggml_te>
6月 11 01:18:05 Venue-vPro ollama[2849]: #4 0x00000000006866db in ggml_backend_cuda_graph_compute(ggml_backend*, ggml_cgraph*) ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #5 0x000000000064a42b in ggml_backend_sched_graph_compute_async ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #6 0x000000000055c91f in llama_decode ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #7 0x00000000004ffbe4 in llama_init_from_gpt_params(gpt_params&) ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #8 0x00000000004a158d in llama_server_context::load_model(gpt_params const&) ()
6月 11 01:18:05 Venue-vPro ollama[2849]: #9 0x0000000000432ed6 in main ()
6月 11 01:18:05 Venue-vPro ollama[2849]: [Inferior 1 (process 2813) detached]
6月 11 01:18:05 Venue-vPro ollama[2760]: time=2024-06-11T01:18:05.395+08:00 level=INFO source=server.go:567 msg="waiting for server to become available" status="ll>
6月 11 01:18:05 Venue-vPro ollama[2760]: time=2024-06-11T01:18:05.646+08:00 level=ERROR source=sched.go:344 msg="error loading llama server" error="llama runner pr>
6月 11 01:18:05 Venue-vPro ollama[2760]: [GIN] 2024/06/11 - 01:18:05 | 500 | 13.241382363s | 127.0.0.1 | POST "/api/chat"
6月 11 01:18:10 Venue-vPro ollama[2760]: time=2024-06-11T01:18:10.885+08:00 level=WARN source=sched.go:511 msg="gpu VRAM usage didn't recover within timeout" secon>
6月 11 01:18:11 Venue-vPro ollama[2760]: time=2024-06-11T01:18:11.134+08:00 level=WARN source=sched.go:511 msg="gpu VRAM usage didn't recover within timeout" secon>
6月 11 01:18:11 Venue-vPro ollama[2760]: time=2024-06-11T01:18:11.385+08:00 level=WARN source=sched.go:511 msg="gpu VRAM usage didn't recover within timeout" secon>
### OS
Linux
### GPU
Nvidia
### CPU
Intel
### Ollama version
0.1.42 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4964/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4964/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/7449 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7449/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7449/comments | https://api.github.com/repos/ollama/ollama/issues/7449/events | https://github.com/ollama/ollama/issues/7449 | 2,627,008,382 | I_kwDOJ0Z1Ps6clPd- | 7,449 | Support for BGE-Multilingual-Gemma2 | {
"login": "JPC612",
"id": 177754485,
"node_id": "U_kgDOCphRdQ",
"avatar_url": "https://avatars.githubusercontent.com/u/177754485?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/JPC612",
"html_url": "https://github.com/JPC612",
"followers_url": "https://api.github.com/users/JPC612/followers",
"following_url": "https://api.github.com/users/JPC612/following{/other_user}",
"gists_url": "https://api.github.com/users/JPC612/gists{/gist_id}",
"starred_url": "https://api.github.com/users/JPC612/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JPC612/subscriptions",
"organizations_url": "https://api.github.com/users/JPC612/orgs",
"repos_url": "https://api.github.com/users/JPC612/repos",
"events_url": "https://api.github.com/users/JPC612/events{/privacy}",
"received_events_url": "https://api.github.com/users/JPC612/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5789807732,
"node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA",
"url": "https://api.github.com/repos/ollama/ollama/labels/model%20request",
"name": "model request",
"color": "1E5DE6",
"default": false,
"description": "Model requests"
}
] | open | false | null | [] | null | 2 | 2024-10-31T14:30:22 | 2024-11-18T08:49:13 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | I would be very grateful for the support of the BGE-Multilingual-Gemma2, an LLM-based multilingual embedding model.
https://huggingface.co/BAAI/bge-multilingual-gemma2
| null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7449/reactions",
"total_count": 6,
"+1": 6,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7449/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/3676 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3676/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3676/comments | https://api.github.com/repos/ollama/ollama/issues/3676/events | https://github.com/ollama/ollama/issues/3676 | 2,246,498,648 | I_kwDOJ0Z1Ps6F5tlY | 3,676 | gork ai support in ollama | {
"login": "olumolu",
"id": 162728301,
"node_id": "U_kgDOCbMJbQ",
"avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/olumolu",
"html_url": "https://github.com/olumolu",
"followers_url": "https://api.github.com/users/olumolu/followers",
"following_url": "https://api.github.com/users/olumolu/following{/other_user}",
"gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/olumolu/subscriptions",
"organizations_url": "https://api.github.com/users/olumolu/orgs",
"repos_url": "https://api.github.com/users/olumolu/repos",
"events_url": "https://api.github.com/users/olumolu/events{/privacy}",
"received_events_url": "https://api.github.com/users/olumolu/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-04-16T16:51:00 | 2024-04-16T20:41:23 | 2024-04-16T20:41:22 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What model would you like?
https://huggingface.co/xai-org/grok-1
| {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3676/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3676/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8532 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8532/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8532/comments | https://api.github.com/repos/ollama/ollama/issues/8532/events | https://github.com/ollama/ollama/issues/8532 | 2,803,786,232 | I_kwDOJ0Z1Ps6nHmH4 | 8,532 | ollama only using cpu even with gpu found | {
"login": "nyllewin",
"id": 22198088,
"node_id": "MDQ6VXNlcjIyMTk4MDg4",
"avatar_url": "https://avatars.githubusercontent.com/u/22198088?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/nyllewin",
"html_url": "https://github.com/nyllewin",
"followers_url": "https://api.github.com/users/nyllewin/followers",
"following_url": "https://api.github.com/users/nyllewin/following{/other_user}",
"gists_url": "https://api.github.com/users/nyllewin/gists{/gist_id}",
"starred_url": "https://api.github.com/users/nyllewin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nyllewin/subscriptions",
"organizations_url": "https://api.github.com/users/nyllewin/orgs",
"repos_url": "https://api.github.com/users/nyllewin/repos",
"events_url": "https://api.github.com/users/nyllewin/events{/privacy}",
"received_events_url": "https://api.github.com/users/nyllewin/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 8 | 2025-01-22T08:58:01 | 2025-01-29T12:57:01 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
hello,
this has been reported in the past at least two times, I am here to report it a third time because something doesnt seem right.
relevant issues:
https://github.com/ollama/ollama/issues/8485
https://github.com/ollama/ollama/issues/8467
same error, same fix with ' just reinstalling within the same session no reboot no nothing just magically fixes it.
installing from official arch repos also causes the gpu not to be used, any way to .. fix that?
`
> 2025/01/22 09:42:17 routes.go:1187: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: HTTPS_PROXY: HTTP_PROXY: NO_PROXY: OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_GPU_OVERHEAD:0 OLLAMA_HOST:http://127.0.0.1:11434 OLLAMA_INTEL_GPU:false OLLAMA_KEEP_ALIVE:5m0s OLLAMA_KV_CACHE_TYPE: OLLAMA_LLM_LIBRARY: OLLAMA_LOAD_TIMEOUT:5m0s OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/home/nylle/.ollama/models OLLAMA_MULTIUSER_CACHE:false OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://* vscode-webview://*] OLLAMA_SCHED_SPREAD:false ROCR_VISIBLE_DEVICES: http_proxy: https_proxy: no_proxy:]"
> time=2025-01-22T09:42:17.713+01:00 level=INFO source=images.go:432 msg="total blobs: 30"
> time=2025-01-22T09:42:17.713+01:00 level=INFO source=images.go:439 msg="total unused blobs removed: 0"
> [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.
>
> [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production.
> - using env: export GIN_MODE=release
> - using code: gin.SetMode(gin.ReleaseMode)
>
> [GIN-debug] POST /api/pull --> github.com/ollama/ollama/server.(*Server).PullHandler-fm (5 handlers)
> [GIN-debug] POST /api/generate --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers)
> [GIN-debug] POST /api/chat --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers)
> [GIN-debug] POST /api/embed --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers)
> [GIN-debug] POST /api/embeddings --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers)
> [GIN-debug] POST /api/create --> github.com/ollama/ollama/server.(*Server).CreateHandler-fm (5 handlers)
> [GIN-debug] POST /api/push --> github.com/ollama/ollama/server.(*Server).PushHandler-fm (5 handlers)
> [GIN-debug] POST /api/copy --> github.com/ollama/ollama/server.(*Server).CopyHandler-fm (5 handlers)
> [GIN-debug] DELETE /api/delete --> github.com/ollama/ollama/server.(*Server).DeleteHandler-fm (5 handlers)
> [GIN-debug] POST /api/show --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (5 handlers)
> [GIN-debug] POST /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers)
> [GIN-debug] HEAD /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers)
> [GIN-debug] GET /api/ps --> github.com/ollama/ollama/server.(*Server).PsHandler-fm (5 handlers)
> [GIN-debug] POST /v1/chat/completions --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers)
> [GIN-debug] POST /v1/completions --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers)
> [GIN-debug] POST /v1/embeddings --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers)
> [GIN-debug] GET /v1/models --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (6 handlers)
> [GIN-debug] GET /v1/models/:model --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (6 handlers)
> [GIN-debug] GET / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
> [GIN-debug] GET /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers)
> [GIN-debug] GET /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
> [GIN-debug] HEAD / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
> [GIN-debug] HEAD /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers)
> [GIN-debug] HEAD /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
> time=2025-01-22T09:42:17.714+01:00 level=INFO source=routes.go:1238 msg="Listening on 127.0.0.1:11434 (version 0.5.7)"
> time=2025-01-22T09:42:17.714+01:00 level=INFO source=routes.go:1267 msg="Dynamic LLM libraries" runners=[cpu]
> time=2025-01-22T09:42:17.715+01:00 level=INFO source=gpu.go:226 msg="looking for compatible GPUs"
> time=2025-01-22T09:42:17.991+01:00 level=INFO source=types.go:131 msg="inference compute" id=GPU-f16d0b56-1989-0c40-d33b-480f8247ae00 library=cuda variant=v12 compute=8.9 driver=12.7 name="NVIDIA GeForce RTX 4070" total="11.6 GiB" available="10.9 GiB"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 2.178715ms | 127.0.0.1 | GET "/api/tags"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 2.279349ms | 127.0.0.1 | GET "/api/tags"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 569.677µs | 127.0.0.1 | GET "/api/tags"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 641.407µs | 127.0.0.1 | GET "/api/tags"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 952.749µs | 127.0.0.1 | GET "/api/tags"
> [GIN] 2025/01/22 - 09:42:29 | 404 | 2.06483ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 404 | 3.234779ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 404 | 708.286µs | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 404 | 4.649411ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 404 | 507.699µs | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 15.227486ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.713616ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 16.811971ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.913503ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 16.80457ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 11.736522ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.276927ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 15.946584ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 16.378602ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 16.905593ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 19.674409ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 9.267617ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 18.489808ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 9.548855ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.015642ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 18.927918ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 10.819718ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 20.026506ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 20.160486ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 20.971805ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 22.430443ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 20.434382ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 23.104035ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 19.600649ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 12.181043ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 9.689575ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 11.553348ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 23.011692ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 24.279914ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 24.367486ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 21.774185ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 23.247884ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 17.658695ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 23.333246ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 19.259493ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.342186ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 14.479045ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 26.379969ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 12.077609ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 24.800855ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 17.678988ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 27.778669ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 28.465122ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 21.794806ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 33.221807ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 26.157838ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 6.808224ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 7.428048ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 7.468563ms | 127.0.0.1 | POST "/api/show"
> [GIN] 2025/01/22 - 09:42:29 | 200 | 7.715287ms | 127.0.0.1 | POST "/api/show"
> time=2025-01-22T09:42:34.091+01:00 level=INFO source=sched.go:714 msg="new model will fit in available VRAM in single GPU, loading" model=/home/nylle/.ollama/models/blobs/sha256-e04bbddd58d9290a89af21ef484ce1113ff34ef35e822e95e52ff1045bac17f5 gpu=GPU-f16d0b56-1989-0c40-d33b-480f8247ae00 parallel=1 available=11592138752 required="9.1 GiB"
> time=2025-01-22T09:42:34.212+01:00 level=INFO source=server.go:104 msg="system memory" total="31.3 GiB" free="26.3 GiB" free_swap="32.0 GiB"
> time=2025-01-22T09:42:34.213+01:00 level=INFO source=memory.go:356 msg="offload to cuda" layers.requested=-1 layers.model=33 layers.offload=33 layers.split="" memory.available="[10.8 GiB]" memory.gpu_overhead="0 B" memory.required.full="9.1 GiB" memory.required.partial="9.1 GiB" memory.required.kv="4.0 GiB" memory.required.allocations="[9.1 GiB]" memory.weights.total="7.7 GiB" memory.weights.repeating="7.6 GiB" memory.weights.nonrepeating="103.4 MiB" memory.graph.full="553.8 MiB" memory.graph.partial="673.3 MiB"
> time=2025-01-22T09:42:34.213+01:00 level=INFO source=server.go:376 msg="starting llama server" cmd="/usr/local/bin/ollama runner --model /home/nylle/.ollama/models/blobs/sha256-e04bbddd58d9290a89af21ef484ce1113ff34ef35e822e95e52ff1045bac17f5 --ctx-size 8096 --batch-size 512 --n-gpu-layers 33 --threads 16 --parallel 1 --port 42953"
> time=2025-01-22T09:42:34.213+01:00 level=INFO source=sched.go:449 msg="loaded runners" count=1
> time=2025-01-22T09:42:34.213+01:00 level=INFO source=server.go:555 msg="waiting for llama runner to start responding"
> time=2025-01-22T09:42:34.214+01:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server error"
> time=2025-01-22T09:42:34.223+01:00 level=INFO source=runner.go:936 msg="starting go runner"
> time=2025-01-22T09:42:34.223+01:00 level=INFO source=runner.go:937 msg=system info="CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | cgo(gcc)" threads=16
> time=2025-01-22T09:42:34.223+01:00 level=INFO source=runner.go:995 msg="Server listening on 127.0.0.1:42953"
> llama_model_loader: loaded meta data with 25 key-value pairs and 291 tensors from /home/nylle/.ollama/models/blobs/sha256-e04bbddd58d9290a89af21ef484ce1113ff34ef35e822e95e52ff1045bac17f5 (version GGUF V3 (latest))
> llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
> llama_model_loader: - kv 0: general.architecture str = llama
> llama_model_loader: - kv 1: general.name str = deepseek-ai
> llama_model_loader: - kv 2: llama.context_length u32 = 16384
> llama_model_loader: - kv 3: llama.embedding_length u32 = 4096
> llama_model_loader: - kv 4: llama.block_count u32 = 32
> llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008
> llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128
> llama_model_loader: - kv 7: llama.attention.head_count u32 = 32
> llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32
> llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001
> llama_model_loader: - kv 10: llama.rope.freq_base f32 = 100000.000000
> llama_model_loader: - kv 11: llama.rope.scaling.type str = linear
> llama_model_loader: - kv 12: llama.rope.scaling.factor f32 = 4.000000
> llama_model_loader: - kv 13: general.file_type u32 = 3
> llama_model_loader: - kv 14: tokenizer.ggml.model str = gpt2
> llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,32256] = ["!", "\"", "#", "$", "%", "&", "'", ...
> llama_model_loader: - kv 16: tokenizer.ggml.scores arr[f32,32256] = [0.000000, 0.000000, 0.000000, 0.0000...
> llama_model_loader: - kv 17: tokenizer.ggml.token_type arr[i32,32256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
> llama_model_loader: - kv 18: tokenizer.ggml.merges arr[str,31757] = ["Ġ Ġ", "Ġ t", "Ġ a", "i n", "h e...
> llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 32013
> llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 32014
> llama_model_loader: - kv 21: tokenizer.ggml.padding_token_id u32 = 32014
> llama_model_loader: - kv 22: tokenizer.ggml.add_bos_token bool = true
> llama_model_loader: - kv 23: tokenizer.ggml.add_eos_token bool = false
> llama_model_loader: - kv 24: general.quantization_version u32 = 2
> llama_model_loader: - type f32: 65 tensors
> llama_model_loader: - type q4_1: 225 tensors
> llama_model_loader: - type q6_K: 1 tensors
> llm_load_vocab: missing or unrecognized pre-tokenizer type, using: 'default'
> llm_load_vocab: control-looking token: 32015 '<|fim▁hole|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: control-looking token: 32017 '<|fim▁end|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: control-looking token: 32016 '<|fim▁begin|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
> llm_load_vocab: special tokens cache size = 256
> llm_load_vocab: token to piece cache size = 0.1792 MB
> llm_load_print_meta: format = GGUF V3 (latest)
> llm_load_print_meta: arch = llama
> llm_load_print_meta: vocab type = BPE
> llm_load_print_meta: n_vocab = 32256
> llm_load_print_meta: n_merges = 31757
> llm_load_print_meta: vocab_only = 0
> llm_load_print_meta: n_ctx_train = 16384
> llm_load_print_meta: n_embd = 4096
> llm_load_print_meta: n_layer = 32
> llm_load_print_meta: n_head = 32
> llm_load_print_meta: n_head_kv = 32
> llm_load_print_meta: n_rot = 128
> llm_load_print_meta: n_swa = 0
> llm_load_print_meta: n_embd_head_k = 128
> llm_load_print_meta: n_embd_head_v = 128
> llm_load_print_meta: n_gqa = 1
> llm_load_print_meta: n_embd_k_gqa = 4096
> llm_load_print_meta: n_embd_v_gqa = 4096
> llm_load_print_meta: f_norm_eps = 0.0e+00
> llm_load_print_meta: f_norm_rms_eps = 1.0e-06
> llm_load_print_meta: f_clamp_kqv = 0.0e+00
> llm_load_print_meta: f_max_alibi_bias = 0.0e+00
> llm_load_print_meta: f_logit_scale = 0.0e+00
> llm_load_print_meta: n_ff = 11008
> llm_load_print_meta: n_expert = 0
> llm_load_print_meta: n_expert_used = 0
> llm_load_print_meta: causal attn = 1
> llm_load_print_meta: pooling type = 0
> llm_load_print_meta: rope type = 0
> llm_load_print_meta: rope scaling = linear
> llm_load_print_meta: freq_base_train = 100000.0
> llm_load_print_meta: freq_scale_train = 0.25
> llm_load_print_meta: n_ctx_orig_yarn = 16384
> llm_load_print_meta: rope_finetuned = unknown
> llm_load_print_meta: ssm_d_conv = 0
> llm_load_print_meta: ssm_d_inner = 0
> llm_load_print_meta: ssm_d_state = 0
> llm_load_print_meta: ssm_dt_rank = 0
> llm_load_print_meta: ssm_dt_b_c_rms = 0
> llm_load_print_meta: model type = 7B
> llm_load_print_meta: model ftype = Q4_1
> llm_load_print_meta: model params = 6.74 B
> llm_load_print_meta: model size = 3.95 GiB (5.03 BPW)
> llm_load_print_meta: general.name = deepseek-ai
> llm_load_print_meta: BOS token = 32013 '<|begin▁of▁sentence|>'
> llm_load_print_meta: EOS token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: EOT token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: PAD token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: LF token = 126 'Ä'
> llm_load_print_meta: FIM PRE token = 32016 '<|fim▁begin|>'
> llm_load_print_meta: FIM SUF token = 32015 '<|fim▁hole|>'
> llm_load_print_meta: FIM MID token = 32017 '<|fim▁end|>'
> llm_load_print_meta: EOG token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: max token length = 128
> time=2025-01-22T09:42:34.465+01:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server loading model"
> llm_load_tensors: CPU_Mapped model buffer size = 4043.12 MiB
> llama_new_context_with_model: n_seq_max = 1
> llama_new_context_with_model: n_ctx = 8096
> llama_new_context_with_model: n_ctx_per_seq = 8096
> llama_new_context_with_model: n_batch = 512
> llama_new_context_with_model: n_ubatch = 512
> llama_new_context_with_model: flash_attn = 0
> llama_new_context_with_model: freq_base = 100000.0
> llama_new_context_with_model: freq_scale = 0.25
> llama_new_context_with_model: n_ctx_per_seq (8096) < n_ctx_train (16384) -- the full capacity of the model will not be utilized
> llama_kv_cache_init: kv_size = 8096, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32, can_shift = 1
> llama_kv_cache_init: CPU KV buffer size = 4048.00 MiB
> llama_new_context_with_model: KV self size = 4048.00 MiB, K (f16): 2024.00 MiB, V (f16): 2024.00 MiB
> llama_new_context_with_model: CPU output buffer size = 0.14 MiB
> llama_new_context_with_model: CPU compute buffer size = 553.82 MiB
> llama_new_context_with_model: graph nodes = 1030
> llama_new_context_with_model: graph splits = 1
> time=2025-01-22T09:42:38.977+01:00 level=INFO source=server.go:594 msg="llama runner started in 4.76 seconds"
> llama_model_loader: loaded meta data with 25 key-value pairs and 291 tensors from /home/nylle/.ollama/models/blobs/sha256-e04bbddd58d9290a89af21ef484ce1113ff34ef35e822e95e52ff1045bac17f5 (version GGUF V3 (latest))
> llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
> llama_model_loader: - kv 0: general.architecture str = llama
> llama_model_loader: - kv 1: general.name str = deepseek-ai
> llama_model_loader: - kv 2: llama.context_length u32 = 16384
> llama_model_loader: - kv 3: llama.embedding_length u32 = 4096
> llama_model_loader: - kv 4: llama.block_count u32 = 32
> llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008
> llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128
> llama_model_loader: - kv 7: llama.attention.head_count u32 = 32
> llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32
> llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001
> llama_model_loader: - kv 10: llama.rope.freq_base f32 = 100000.000000
> llama_model_loader: - kv 11: llama.rope.scaling.type str = linear
> llama_model_loader: - kv 12: llama.rope.scaling.factor f32 = 4.000000
> llama_model_loader: - kv 13: general.file_type u32 = 3
> llama_model_loader: - kv 14: tokenizer.ggml.model str = gpt2
> llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,32256] = ["!", "\"", "#", "$", "%", "&", "'", ...
> llama_model_loader: - kv 16: tokenizer.ggml.scores arr[f32,32256] = [0.000000, 0.000000, 0.000000, 0.0000...
> llama_model_loader: - kv 17: tokenizer.ggml.token_type arr[i32,32256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
> llama_model_loader: - kv 18: tokenizer.ggml.merges arr[str,31757] = ["Ġ Ġ", "Ġ t", "Ġ a", "i n", "h e...
> llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 32013
> llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 32014
> llama_model_loader: - kv 21: tokenizer.ggml.padding_token_id u32 = 32014
> llama_model_loader: - kv 22: tokenizer.ggml.add_bos_token bool = true
> llama_model_loader: - kv 23: tokenizer.ggml.add_eos_token bool = false
> llama_model_loader: - kv 24: general.quantization_version u32 = 2
> llama_model_loader: - type f32: 65 tensors
> llama_model_loader: - type q4_1: 225 tensors
> llama_model_loader: - type q6_K: 1 tensors
> llm_load_vocab: missing or unrecognized pre-tokenizer type, using: 'default'
> llm_load_vocab: control-looking token: 32015 '<|fim▁hole|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: control-looking token: 32017 '<|fim▁end|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: control-looking token: 32016 '<|fim▁begin|>' was not control-type; this is probably a bug in the model. its type will be overridden
> llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
> llm_load_vocab: special tokens cache size = 256
> llm_load_vocab: token to piece cache size = 0.1792 MB
> llm_load_print_meta: format = GGUF V3 (latest)
> llm_load_print_meta: arch = llama
> llm_load_print_meta: vocab type = BPE
> llm_load_print_meta: n_vocab = 32256
> llm_load_print_meta: n_merges = 31757
> llm_load_print_meta: vocab_only = 1
> llm_load_print_meta: model type = ?B
> llm_load_print_meta: model ftype = all F32
> llm_load_print_meta: model params = 6.74 B
> llm_load_print_meta: model size = 3.95 GiB (5.03 BPW)
> llm_load_print_meta: general.name = deepseek-ai
> llm_load_print_meta: BOS token = 32013 '<|begin▁of▁sentence|>'
> llm_load_print_meta: EOS token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: EOT token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: PAD token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: LF token = 126 'Ä'
> llm_load_print_meta: FIM PRE token = 32016 '<|fim▁begin|>'
> llm_load_print_meta: FIM SUF token = 32015 '<|fim▁hole|>'
> llm_load_print_meta: FIM MID token = 32017 '<|fim▁end|>'
> llm_load_print_meta: EOG token = 32014 '<|end▁of▁sentence|>'
> llm_load_print_meta: max token length = 128
> llama_model_load: vocab only - skipping tensors
> [GIN] 2025/01/22 - 09:43:28 | 200 | 54.425610491s | 127.0.0.1 | POST "/api/chat"
`
### OS
Linux
### GPU
Nvidia
### CPU
AMD
### Ollama version
0.5.7 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8532/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8532/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/4005 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4005/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4005/comments | https://api.github.com/repos/ollama/ollama/issues/4005/events | https://github.com/ollama/ollama/issues/4005 | 2,267,611,165 | I_kwDOJ0Z1Ps6HKQAd | 4,005 | curl: (7) Failed to connect to 172.16.105.65 port 11434 after 0 ms: Couldn't connect to server | {
"login": "moye12325",
"id": 43414308,
"node_id": "MDQ6VXNlcjQzNDE0MzA4",
"avatar_url": "https://avatars.githubusercontent.com/u/43414308?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/moye12325",
"html_url": "https://github.com/moye12325",
"followers_url": "https://api.github.com/users/moye12325/followers",
"following_url": "https://api.github.com/users/moye12325/following{/other_user}",
"gists_url": "https://api.github.com/users/moye12325/gists{/gist_id}",
"starred_url": "https://api.github.com/users/moye12325/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/moye12325/subscriptions",
"organizations_url": "https://api.github.com/users/moye12325/orgs",
"repos_url": "https://api.github.com/users/moye12325/repos",
"events_url": "https://api.github.com/users/moye12325/events{/privacy}",
"received_events_url": "https://api.github.com/users/moye12325/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 5 | 2024-04-28T14:45:00 | 2025-01-30T07:28:12 | 2024-04-28T15:01:17 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
**I have set up listening and port settings**
![image](https://github.com/ollama/ollama/assets/43414308/5e7adbf3-e29b-4fc2-9bae-490a82064cbf)
**And I can use it on my server**
![image](https://github.com/ollama/ollama/assets/43414308/fde96efd-c4c4-4b04-8e17-ac6fdf8ea591)
![image](https://github.com/ollama/ollama/assets/43414308/066fda7b-7a60-4bd3-8e64-eee61b17ca28)
**My local computer can access my remote server, and port 3306 can also be accessed, but port 11434 is not working. I have also checked the server's firewall and there is no problem**
![image](https://github.com/ollama/ollama/assets/43414308/f85c4053-1dd4-4244-9693-8b3519cabeb4)
## Could you help me? please please please
### OS
Linux, Windows
### GPU
Nvidia
### CPU
Intel
### Ollama version
0.1.32 | {
"login": "moye12325",
"id": 43414308,
"node_id": "MDQ6VXNlcjQzNDE0MzA4",
"avatar_url": "https://avatars.githubusercontent.com/u/43414308?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/moye12325",
"html_url": "https://github.com/moye12325",
"followers_url": "https://api.github.com/users/moye12325/followers",
"following_url": "https://api.github.com/users/moye12325/following{/other_user}",
"gists_url": "https://api.github.com/users/moye12325/gists{/gist_id}",
"starred_url": "https://api.github.com/users/moye12325/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/moye12325/subscriptions",
"organizations_url": "https://api.github.com/users/moye12325/orgs",
"repos_url": "https://api.github.com/users/moye12325/repos",
"events_url": "https://api.github.com/users/moye12325/events{/privacy}",
"received_events_url": "https://api.github.com/users/moye12325/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4005/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4005/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/4366 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4366/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4366/comments | https://api.github.com/repos/ollama/ollama/issues/4366/events | https://github.com/ollama/ollama/pull/4366 | 2,291,062,570 | PR_kwDOJ0Z1Ps5vKtbh | 4,366 | case sensitive filepaths | {
"login": "mxyng",
"id": 2372640,
"node_id": "MDQ6VXNlcjIzNzI2NDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxyng",
"html_url": "https://github.com/mxyng",
"followers_url": "https://api.github.com/users/mxyng/followers",
"following_url": "https://api.github.com/users/mxyng/following{/other_user}",
"gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxyng/subscriptions",
"organizations_url": "https://api.github.com/users/mxyng/orgs",
"repos_url": "https://api.github.com/users/mxyng/repos",
"events_url": "https://api.github.com/users/mxyng/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxyng/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-05-11T20:51:28 | 2024-05-11T21:12:37 | 2024-05-11T21:12:37 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/4366",
"html_url": "https://github.com/ollama/ollama/pull/4366",
"diff_url": "https://github.com/ollama/ollama/pull/4366.diff",
"patch_url": "https://github.com/ollama/ollama/pull/4366.patch",
"merged_at": "2024-05-11T21:12:37"
} | TODO: filenames can be case sensitive but filepaths should not. however this needs to be backwards compatible. it currently is not so fix the regression first
resolves #4346 | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4366/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4366/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/908 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/908/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/908/comments | https://api.github.com/repos/ollama/ollama/issues/908/events | https://github.com/ollama/ollama/issues/908 | 1,962,610,243 | I_kwDOJ0Z1Ps50-w5D | 908 | Whether the chatglm2 model can be supported | {
"login": "ddv404",
"id": 97394404,
"node_id": "U_kgDOBc4e5A",
"avatar_url": "https://avatars.githubusercontent.com/u/97394404?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ddv404",
"html_url": "https://github.com/ddv404",
"followers_url": "https://api.github.com/users/ddv404/followers",
"following_url": "https://api.github.com/users/ddv404/following{/other_user}",
"gists_url": "https://api.github.com/users/ddv404/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ddv404/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ddv404/subscriptions",
"organizations_url": "https://api.github.com/users/ddv404/orgs",
"repos_url": "https://api.github.com/users/ddv404/repos",
"events_url": "https://api.github.com/users/ddv404/events{/privacy}",
"received_events_url": "https://api.github.com/users/ddv404/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2023-10-26T02:58:43 | 2023-10-26T03:12:05 | 2023-10-26T03:12:05 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Whether the chatglm2 model can be supported
| {
"login": "mchiang0610",
"id": 3325447,
"node_id": "MDQ6VXNlcjMzMjU0NDc=",
"avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mchiang0610",
"html_url": "https://github.com/mchiang0610",
"followers_url": "https://api.github.com/users/mchiang0610/followers",
"following_url": "https://api.github.com/users/mchiang0610/following{/other_user}",
"gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions",
"organizations_url": "https://api.github.com/users/mchiang0610/orgs",
"repos_url": "https://api.github.com/users/mchiang0610/repos",
"events_url": "https://api.github.com/users/mchiang0610/events{/privacy}",
"received_events_url": "https://api.github.com/users/mchiang0610/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/908/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/908/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5417 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5417/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5417/comments | https://api.github.com/repos/ollama/ollama/issues/5417/events | https://github.com/ollama/ollama/issues/5417 | 2,384,659,698 | I_kwDOJ0Z1Ps6OIwTy | 5,417 | Cloudflare Tunnel + Vercel AI SDK = `[AI_JSONParseError]: JSON parsing failed` | {
"login": "KastanDay",
"id": 13607221,
"node_id": "MDQ6VXNlcjEzNjA3MjIx",
"avatar_url": "https://avatars.githubusercontent.com/u/13607221?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/KastanDay",
"html_url": "https://github.com/KastanDay",
"followers_url": "https://api.github.com/users/KastanDay/followers",
"following_url": "https://api.github.com/users/KastanDay/following{/other_user}",
"gists_url": "https://api.github.com/users/KastanDay/gists{/gist_id}",
"starred_url": "https://api.github.com/users/KastanDay/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/KastanDay/subscriptions",
"organizations_url": "https://api.github.com/users/KastanDay/orgs",
"repos_url": "https://api.github.com/users/KastanDay/repos",
"events_url": "https://api.github.com/users/KastanDay/events{/privacy}",
"received_events_url": "https://api.github.com/users/KastanDay/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | 1 | 2024-07-01T20:17:12 | 2024-07-24T22:29:24 | 2024-07-24T22:29:23 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
## Intro
Streaming from `Ollama -> Cloudflare Tunnel -> Vercel AI SDK` errors when parsing the stream: `[AI_JSONParseError]: JSON parsing failed`.
My working hypothesis is that Cloudflare tunnels is not respecting the proper `chunk size` of each message when streaming it, causing the JSON parser to error out. It's streaming partial chunks, not complete JSON strings.
My Cloudflare tunnels are configured [per the FAQ](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-use-ollama-with-cloudflare-tunnel), and seems to work fine when called via curl.
![CleanShot 2024-07-01 at 13 15 29](https://github.com/ollama/ollama/assets/13607221/e8dd36df-80f4-4bf7-abdb-bbd59e396919)
## Existing discussion
Migrated to `JsonStreamResponseHandler` but the issue persists:
https://github.com/sgomez/ollama-ai-provider/issues/7
## Reproducible code snippet
```ts
import { streamText } from 'ai'
import { createOllama } from 'ollama-ai-provider'
const ollama = createOllama({
baseURL: 'https://ollama.<redacted>.ai/api',
})
const result = await streamText({
maxTokens: 1024,
messages: [
{
content: 'Hello!',
role: 'user',
},
],
model: ollama('llama3:8b-instruct'),
system: 'You are a helpful chatbot.',
})
console.log("Right before ollama streaming...")
for await (const textPart of result.textStream) {
console.log('OLLAMA TEXT PART:', textPart)
}
return result
```
Full error message:
```
Right before ollama streaming...
[AI_JSONParseError: JSON parsing failed: Text: {"model":"llama3:8b","created_at":"2024-07-01T19:49:07.129596822Z","message":{"role":"assistant","content":"Hello"},"done":false}
{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.132765022Z","message":{"role":"assistant","content":" there"},"done":false}
{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.14696934Z","message":{"role":"assistant","content":"!"},"done":false}
{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.161141978Z","message":{"role":"assistant","content":" It"},"done":false}
{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.770545011Z","message":{"role":"assistant","content":")!"},"done":false}
{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.77983313Z","message":{"role":"assistant","content":""},"done_reason":"stop","done":true,"total_duration":853379439,"load_duration":1312920,"prompt_eval_count":24,"prompt_eval_duration":37965000,"eval_count":64,"eval_duration":661216000}
.
Error message: Unexpected token { in JSON at position 130] {
name: 'AI_JSONParseError',
cause: [SyntaxError: Unexpected token { in JSON at position 130],
text: '{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.129596822Z","message":{"role":"assistant","content":"Hello"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.132765022Z","message":{"role":"assistant","content":" there"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.14696934Z","message":{"role":"assistant","content":"!"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.161141978Z","message":{"role":"assistant","content":" It"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.175354656Z","message":{"role":"assistant","content":"\'s"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.189034134Z","message":{"role":"assistant","content":" nice"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.202178812Z","message":{"role":"assistant","content":" to"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.21526525Z","message":{"role":"assistant","content":" meet"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.228400448Z","message":{"role":"assistant","content":" you"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.241509166Z","message":{"role":"assistant","content":"!"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.254661925Z","message":{"role":"assistant","content":" I"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.267805883Z","message":{"role":"assistant","content":"\'m"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.280930561Z","message":{"role":"assistant","content":" here"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.294083639Z","message":{"role":"assistant","content":" to"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.307236877Z","message":{"role":"assistant","content":" help"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.320394635Z","message":{"role":"assistant","content":" with"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.333574393Z","message":{"role":"assistant","content":" any"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.346525871Z","message":{"role":"assistant","content":" questions"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.35950659Z","message":{"role":"assistant","content":" or"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.372517588Z","message":{"role":"assistant","content":" topics"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.385516426Z","message":{"role":"assistant","content":" you"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.394613585Z","message":{"role":"assistant","content":"\'d"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.403703423Z","message":{"role":"assistant","content":" like"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.412812182Z","message":{"role":"assistant","content":" to"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.421887661Z","message":{"role":"assistant","content":" discuss"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.431009899Z","message":{"role":"assistant","content":"."},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.440152898Z","message":{"role":"assistant","content":" What"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.449286417Z","message":{"role":"assistant","content":"\'s"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.458441016Z","message":{"role":"assistant","content":" on"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.467598614Z","message":{"role":"assistant","content":" your"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.476763813Z","message":{"role":"assistant","content":" mind"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.485901532Z","message":{"role":"assistant","content":" today"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.49505961Z","message":{"role":"assistant","content":"?"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.504220009Z","message":{"role":"assistant","content":" Do"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.513390688Z","message":{"role":"assistant","content":" you"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.522544327Z","message":{"role":"assistant","content":" have"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.531717245Z","message":{"role":"assistant","content":" a"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.540875164Z","message":{"role":"assistant","content":" specific"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.550055843Z","message":{"role":"assistant","content":" question"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.559263601Z","message":{"role":"assistant","content":","},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.56846796Z","message":{"role":"assistant","content":" or"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.577638959Z","message":{"role":"assistant","content":" would"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.586863157Z","message":{"role":"assistant","content":" you"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.596027116Z","message":{"role":"assistant","content":" like"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.605212155Z","message":{"role":"assistant","content":" some"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.614404713Z","message":{"role":"assistant","content":" recommendations"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.623591192Z","message":{"role":"assistant","content":" for"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.632772391Z","message":{"role":"assistant","content":" things"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.64195111Z","message":{"role":"assistant","content":" to"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.651143948Z","message":{"role":"assistant","content":" talk"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.660327467Z","message":{"role":"assistant","content":" about"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.669527986Z","message":{"role":"assistant","content":"?"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.678761904Z","message":{"role":"assistant","content":" I"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.687901863Z","message":{"role":"assistant","content":"\'m"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.697099262Z","message":{"role":"assistant","content":" all"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.70626678Z","message":{"role":"assistant","content":" ears"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.715456819Z","message":{"role":"assistant","content":" ("},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.724641818Z","message":{"role":"assistant","content":"or"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.733815857Z","message":{"role":"assistant","content":" rather"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.743009655Z","message":{"role":"assistant","content":","},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.752196454Z","message":{"role":"assistant","content":" all"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.761359213Z","message":{"role":"assistant","content":" text"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.770545011Z","message":{"role":"assistant","content":")!"},"done":false}\n{"model":"llama3:8b","created_at":"2024-07-01T19:49:07.77983313Z","message":{"role":"assistant","content":""},"done_reason":"stop","done":true,"total_duration":853379439,"load_duration":1312920,"prompt_eval_count":24,"prompt_eval_duration":37965000,"eval_count":64,"eval_duration":661216000}\n'
}
```
## Environment
* Ollama version `0.1.46`
* npm view ollama-ai-provider version --> 0.9.1
* npm view ai version --> 3.2.16
* Next.js Pages router
### OS
Linux, Docker
### GPU
Nvidia
### CPU
AMD
### Ollama version
0.1.46 | {
"login": "KastanDay",
"id": 13607221,
"node_id": "MDQ6VXNlcjEzNjA3MjIx",
"avatar_url": "https://avatars.githubusercontent.com/u/13607221?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/KastanDay",
"html_url": "https://github.com/KastanDay",
"followers_url": "https://api.github.com/users/KastanDay/followers",
"following_url": "https://api.github.com/users/KastanDay/following{/other_user}",
"gists_url": "https://api.github.com/users/KastanDay/gists{/gist_id}",
"starred_url": "https://api.github.com/users/KastanDay/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/KastanDay/subscriptions",
"organizations_url": "https://api.github.com/users/KastanDay/orgs",
"repos_url": "https://api.github.com/users/KastanDay/repos",
"events_url": "https://api.github.com/users/KastanDay/events{/privacy}",
"received_events_url": "https://api.github.com/users/KastanDay/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5417/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 2
} | https://api.github.com/repos/ollama/ollama/issues/5417/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/540 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/540/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/540/comments | https://api.github.com/repos/ollama/ollama/issues/540/events | https://github.com/ollama/ollama/pull/540 | 1,899,497,724 | PR_kwDOJ0Z1Ps5afvdV | 540 | Allow setting ollama home directory through environment var OLLAMA_HOME. | {
"login": "JayNakrani",
"id": 6269279,
"node_id": "MDQ6VXNlcjYyNjkyNzk=",
"avatar_url": "https://avatars.githubusercontent.com/u/6269279?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/JayNakrani",
"html_url": "https://github.com/JayNakrani",
"followers_url": "https://api.github.com/users/JayNakrani/followers",
"following_url": "https://api.github.com/users/JayNakrani/following{/other_user}",
"gists_url": "https://api.github.com/users/JayNakrani/gists{/gist_id}",
"starred_url": "https://api.github.com/users/JayNakrani/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JayNakrani/subscriptions",
"organizations_url": "https://api.github.com/users/JayNakrani/orgs",
"repos_url": "https://api.github.com/users/JayNakrani/repos",
"events_url": "https://api.github.com/users/JayNakrani/events{/privacy}",
"received_events_url": "https://api.github.com/users/JayNakrani/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2023-09-16T16:47:56 | 2023-10-25T22:39:15 | 2023-10-25T22:34:41 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/540",
"html_url": "https://github.com/ollama/ollama/pull/540",
"diff_url": "https://github.com/ollama/ollama/pull/540.diff",
"patch_url": "https://github.com/ollama/ollama/pull/540.patch",
"merged_at": null
} | It would be great to be able to specify a different directory location than the default user-home directory. | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/540/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/540/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/1821 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1821/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1821/comments | https://api.github.com/repos/ollama/ollama/issues/1821/events | https://github.com/ollama/ollama/issues/1821 | 2,068,522,984 | I_kwDOJ0Z1Ps57Syfo | 1,821 | amd64 binary for version 0.1.18 won't work with rocm-6.0.0 | {
"login": "chirvo",
"id": 1088243,
"node_id": "MDQ6VXNlcjEwODgyNDM=",
"avatar_url": "https://avatars.githubusercontent.com/u/1088243?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/chirvo",
"html_url": "https://github.com/chirvo",
"followers_url": "https://api.github.com/users/chirvo/followers",
"following_url": "https://api.github.com/users/chirvo/following{/other_user}",
"gists_url": "https://api.github.com/users/chirvo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/chirvo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chirvo/subscriptions",
"organizations_url": "https://api.github.com/users/chirvo/orgs",
"repos_url": "https://api.github.com/users/chirvo/repos",
"events_url": "https://api.github.com/users/chirvo/events{/privacy}",
"received_events_url": "https://api.github.com/users/chirvo/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 3 | 2024-01-06T10:27:48 | 2024-01-11T22:00:49 | 2024-01-11T22:00:49 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null |
This happens when using the Linux binary downloaded from [the web page](https://ollama.ai/download/ollama-linux-amd64).
```
2024/01/06 09:03:56 images.go:834: total blobs: 0
2024/01/06 09:03:56 images.go:841: total unused blobs removed: 0
2024/01/06 09:03:56 routes.go:929: Listening on 127.0.0.1:11434 (version 0.1.18)
2024/01/06 09:03:56 shim_ext_server.go:142: Dynamic LLM variants [cuda rocm]
2024/01/06 09:03:56 gpu.go:34: Detecting GPU type
2024/01/06 09:03:56 gpu.go:39: CUDA not detected: Unable to load libnvidia-ml.so library to query for Nvidia GPUs: /usr/lib/wsl/lib/libnvidia-ml.so.1: cannot open shared object file: No such file or directory
2024/01/06 09:03:56 gpu.go:48: Radeon GPU detected
...
ollama | 2024/01/06 09:06:14 llm.go:90: Failed to load dynamic library rocm - falling back to CPU mode Unable to load dynamic library: Unable to load dynamic server library: libhipblas.so.1: cannot open shared object file: No such file or directory
ollama | 2024/01/06 09:06:14 gpu.go:146: 22476 MB VRAM available, loading up to 609 rocm GPU layers out of 22
```
The binary fails back to CPU because it can't load 3 libraries.
- libhipblas.so.1
- librocblas.so.3
- librocsparse.so.0
**Workaround:**
Just create the pertinent symbolic links for each library:
```bash
cd /opt/rocm-6.0.0/lib
ln -sf libhipblas.so.2.0.60000 libhipblas.so.1
ln -sf librocblas.so.4.0.60000 librocblas.so.3
ln -sf librocsparse.so.1.0.0.60000 librocsparse.so.0
```
After linking the libraries everything should work.
```
2024/01/06 09:56:20 images.go:834: total blobs: 5
2024/01/06 09:56:20 images.go:841: total unused blobs removed: 0
2024/01/06 09:56:20 routes.go:929: Listening on 127.0.0.1:11434 (version 0.1.18)
2024/01/06 09:56:20 shim_ext_server.go:142: Dynamic LLM variants [cuda rocm]
2024/01/06 09:56:20 gpu.go:34: Detecting GPU type
�2024/01/06 09:56:20 gpu.go:39: CUDA not detected: Unable to load libnvidia-ml.so library to query for Nvidia GPUs: /usr/lib/wsl/lib/libnvidia-ml.so.1: cannot open shared object file: No such file or directory
2024/01/06 09:56:20 gpu.go:48: Radeon GPU detected
T[GIN] 2024/01/06 - 09:56:35 | 200 | 31.67µs | 127.0.0.1 | HEAD "/"
\[GIN] 2024/01/06 - 09:56:35 | 200 | 402.531µs | 127.0.0.1 | POST "/api/show"
\[GIN] 2024/01/06 - 09:56:35 | 200 | 198.656µs | 127.0.0.1 | POST "/api/show"
�2024/01/06 09:56:36 shim_ext_server_linux.go:24: Updating PATH to /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/tmp/ollama4208096896/rocm
2024/01/06 09:56:36 shim_ext_server.go:92: Loading Dynamic Shim llm server: /tmp/ollama4208096896/rocm/libext_server.so
2024/01/06 09:56:36 gpu.go:146: 22522 MB VRAM available, loading up to 610 rocm GPU layers out of 22
2024/01/06 09:56:36 ext_server_common.go:143: Initializing internal llama server
ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no
ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes
ggml_init_cublas: found 1 ROCm devices:
Device 0: Radeon RX 7900 XTX, compute capability 11.0
```
Just wanted to document this in case somebody else have the same issue.
Cheers, and thank you all for the amazing work you're doing with ollama. | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1821/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1821/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5757 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5757/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5757/comments | https://api.github.com/repos/ollama/ollama/issues/5757/events | https://github.com/ollama/ollama/pull/5757 | 2,414,669,823 | PR_kwDOJ0Z1Ps51spWK | 5,757 | bump go version to 1.22.5 to fix security vulnerabilities in docker | {
"login": "lreed-mdsol",
"id": 72270603,
"node_id": "MDQ6VXNlcjcyMjcwNjAz",
"avatar_url": "https://avatars.githubusercontent.com/u/72270603?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lreed-mdsol",
"html_url": "https://github.com/lreed-mdsol",
"followers_url": "https://api.github.com/users/lreed-mdsol/followers",
"following_url": "https://api.github.com/users/lreed-mdsol/following{/other_user}",
"gists_url": "https://api.github.com/users/lreed-mdsol/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lreed-mdsol/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lreed-mdsol/subscriptions",
"organizations_url": "https://api.github.com/users/lreed-mdsol/orgs",
"repos_url": "https://api.github.com/users/lreed-mdsol/repos",
"events_url": "https://api.github.com/users/lreed-mdsol/events{/privacy}",
"received_events_url": "https://api.github.com/users/lreed-mdsol/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-07-17T21:59:06 | 2024-07-22T23:32:43 | 2024-07-22T23:32:43 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5757",
"html_url": "https://github.com/ollama/ollama/pull/5757",
"diff_url": "https://github.com/ollama/ollama/pull/5757.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5757.patch",
"merged_at": "2024-07-22T23:32:43"
} | The existing Version of 1.22.1 Is showing Security Vulnerabilities when scanned by Prisma
Scan results for: image ollama/ollama:latest sha256:56505af4d7ed5e66de96c124c21312aee6cdd518098efd0fa524738f24b1a701
Vulnerabilities
| CVE | SEVERITY | CVSS | PACKAGE | VERSION | STATUS | PUBLISHED | DISCOVERED | DESCRIPTION |
| CVE-2024-24790 | critical | 9.80 | net/netip | 1.22.1 | fixed in 1.21.11, 1.22.4 | 42 days | < 1 hour | The various Is methods (IsPrivate, IsLoopback, |
| | | | | | 42 days ago | |
This minor update to GO 1.22.5 fixes the CRITICAL CVE-2024-24790, as well as corrects the MEDIUM CVE-2024-24791 .
I locally built and tested the Docker Build.
Scan results for: image ollama_orig_1_22_5:latest sha256:5b7f98e681c9a7b807d02beecc2eb303a5303a6cd248dcf448ae360e79b759ab
Vulnerabilities found for image ollama_orig_1_22_5:latest: total - 16, critical - 0, high - 0, medium - 4, low - 12
It would be great to get these fixes in ASAP.
| {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5757/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5757/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5304 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5304/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5304/comments | https://api.github.com/repos/ollama/ollama/issues/5304/events | https://github.com/ollama/ollama/issues/5304 | 2,375,630,707 | I_kwDOJ0Z1Ps6NmT9z | 5,304 | Support for multimodal embedding models | {
"login": "k0marov",
"id": 95040709,
"node_id": "U_kgDOBao0xQ",
"avatar_url": "https://avatars.githubusercontent.com/u/95040709?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/k0marov",
"html_url": "https://github.com/k0marov",
"followers_url": "https://api.github.com/users/k0marov/followers",
"following_url": "https://api.github.com/users/k0marov/following{/other_user}",
"gists_url": "https://api.github.com/users/k0marov/gists{/gist_id}",
"starred_url": "https://api.github.com/users/k0marov/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/k0marov/subscriptions",
"organizations_url": "https://api.github.com/users/k0marov/orgs",
"repos_url": "https://api.github.com/users/k0marov/repos",
"events_url": "https://api.github.com/users/k0marov/events{/privacy}",
"received_events_url": "https://api.github.com/users/k0marov/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | open | false | null | [] | null | 3 | 2024-06-26T15:13:57 | 2024-11-12T19:04:22 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hi! It seems I'm not able to find a REST API endpoint for generating embeddings for an image, in other words, providing functionality for using models like CLIP which can take both text and images as input.
But these models are very useful in many applications, such as semantic image search, classification, etc.
Note: I'll be glad to contribute by implementing support for this. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5304/reactions",
"total_count": 28,
"+1": 28,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5304/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/197 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/197/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/197/comments | https://api.github.com/repos/ollama/ollama/issues/197/events | https://github.com/ollama/ollama/pull/197 | 1,818,958,365 | PR_kwDOJ0Z1Ps5WQqRx | 197 | remove file on digest mismatch | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2023-07-24T18:54:33 | 2023-09-08T15:13:26 | 2023-07-24T19:59:12 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/197",
"html_url": "https://github.com/ollama/ollama/pull/197",
"diff_url": "https://github.com/ollama/ollama/pull/197.diff",
"patch_url": "https://github.com/ollama/ollama/pull/197.patch",
"merged_at": "2023-07-24T19:59:12"
} | ideally this never happens (the download resume should prevent this), but if there is a digest mismatch the specific blob should be removed rather than the user manually removing it
related to #170 | {
"login": "BruceMacD",
"id": 5853428,
"node_id": "MDQ6VXNlcjU4NTM0Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BruceMacD",
"html_url": "https://github.com/BruceMacD",
"followers_url": "https://api.github.com/users/BruceMacD/followers",
"following_url": "https://api.github.com/users/BruceMacD/following{/other_user}",
"gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions",
"organizations_url": "https://api.github.com/users/BruceMacD/orgs",
"repos_url": "https://api.github.com/users/BruceMacD/repos",
"events_url": "https://api.github.com/users/BruceMacD/events{/privacy}",
"received_events_url": "https://api.github.com/users/BruceMacD/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/197/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/197/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/2637 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/2637/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/2637/comments | https://api.github.com/repos/ollama/ollama/issues/2637/events | https://github.com/ollama/ollama/issues/2637 | 2,146,959,786 | I_kwDOJ0Z1Ps5_-AGq | 2,637 | Integrated AMD GPU support | {
"login": "DocMAX",
"id": 5351323,
"node_id": "MDQ6VXNlcjUzNTEzMjM=",
"avatar_url": "https://avatars.githubusercontent.com/u/5351323?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/DocMAX",
"html_url": "https://github.com/DocMAX",
"followers_url": "https://api.github.com/users/DocMAX/followers",
"following_url": "https://api.github.com/users/DocMAX/following{/other_user}",
"gists_url": "https://api.github.com/users/DocMAX/gists{/gist_id}",
"starred_url": "https://api.github.com/users/DocMAX/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/DocMAX/subscriptions",
"organizations_url": "https://api.github.com/users/DocMAX/orgs",
"repos_url": "https://api.github.com/users/DocMAX/repos",
"events_url": "https://api.github.com/users/DocMAX/events{/privacy}",
"received_events_url": "https://api.github.com/users/DocMAX/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 6433346500,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA",
"url": "https://api.github.com/repos/ollama/ollama/labels/amd",
"name": "amd",
"color": "000000",
"default": false,
"description": "Issues relating to AMD GPUs and ROCm"
}
] | open | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 144 | 2024-02-21T14:56:12 | 2025-01-07T09:01:33 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Opening a new issue (see https://github.com/ollama/ollama/pull/2195) to track support for integrated GPUs. I have a AMD 5800U CPU with integrated graphics. As far as i did research ROCR lately does support integrated graphics too.
Currently Ollama seems to ignore iGPUs in general. | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/2637/reactions",
"total_count": 32,
"+1": 31,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 1
} | https://api.github.com/repos/ollama/ollama/issues/2637/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/5250 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5250/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5250/comments | https://api.github.com/repos/ollama/ollama/issues/5250/events | https://github.com/ollama/ollama/issues/5250 | 2,369,600,313 | I_kwDOJ0Z1Ps6NPTs5 | 5,250 | best performence with which gpu or cpu? for notebook | {
"login": "olumolu",
"id": 162728301,
"node_id": "U_kgDOCbMJbQ",
"avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/olumolu",
"html_url": "https://github.com/olumolu",
"followers_url": "https://api.github.com/users/olumolu/followers",
"following_url": "https://api.github.com/users/olumolu/following{/other_user}",
"gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/olumolu/subscriptions",
"organizations_url": "https://api.github.com/users/olumolu/orgs",
"repos_url": "https://api.github.com/users/olumolu/repos",
"events_url": "https://api.github.com/users/olumolu/events{/privacy}",
"received_events_url": "https://api.github.com/users/olumolu/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 1 | 2024-06-24T08:35:24 | 2024-06-25T16:22:11 | 2024-06-25T16:22:07 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | could not find the best performance with ollama tried to run with ollama docker with rcom amd gpu
`docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama:rocm`
also with cpu only mode
`docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama`
have 2 systems one laptop with amd 5500U one amd 7540U
what should i use cpu only or use gpu? this 2 systems are notebook does not have dedicated gpu | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5250/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5250/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/8495 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/8495/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/8495/comments | https://api.github.com/repos/ollama/ollama/issues/8495/events | https://github.com/ollama/ollama/issues/8495 | 2,798,176,005 | I_kwDOJ0Z1Ps6myMcF | 8,495 | Why do I keep getting "@@@@" as responses? | {
"login": "Jetbuzz",
"id": 53119016,
"node_id": "MDQ6VXNlcjUzMTE5MDE2",
"avatar_url": "https://avatars.githubusercontent.com/u/53119016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Jetbuzz",
"html_url": "https://github.com/Jetbuzz",
"followers_url": "https://api.github.com/users/Jetbuzz/followers",
"following_url": "https://api.github.com/users/Jetbuzz/following{/other_user}",
"gists_url": "https://api.github.com/users/Jetbuzz/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Jetbuzz/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Jetbuzz/subscriptions",
"organizations_url": "https://api.github.com/users/Jetbuzz/orgs",
"repos_url": "https://api.github.com/users/Jetbuzz/repos",
"events_url": "https://api.github.com/users/Jetbuzz/events{/privacy}",
"received_events_url": "https://api.github.com/users/Jetbuzz/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | 6 | 2025-01-20T04:47:09 | 2025-01-24T15:01:40 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
I have attached the screenshot to what is happening. I have an Nvidia 980m 4gb. Running latest version of Windows 10 and ollama.
![Image](https://github.com/user-attachments/assets/e670af15-7220-4a98-ab74-9cc398ffb52e)
### OS
Windows
### GPU
Nvidia
### CPU
Intel
### Ollama version
0.5.7 | null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/8495/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/8495/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/3114 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3114/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3114/comments | https://api.github.com/repos/ollama/ollama/issues/3114/events | https://github.com/ollama/ollama/issues/3114 | 2,184,347,419 | I_kwDOJ0Z1Ps6CMn8b | 3,114 | Using INT4 Quantization to Save VRAM with ollama | {
"login": "TraceRecursion",
"id": 66545369,
"node_id": "MDQ6VXNlcjY2NTQ1MzY5",
"avatar_url": "https://avatars.githubusercontent.com/u/66545369?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/TraceRecursion",
"html_url": "https://github.com/TraceRecursion",
"followers_url": "https://api.github.com/users/TraceRecursion/followers",
"following_url": "https://api.github.com/users/TraceRecursion/following{/other_user}",
"gists_url": "https://api.github.com/users/TraceRecursion/gists{/gist_id}",
"starred_url": "https://api.github.com/users/TraceRecursion/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/TraceRecursion/subscriptions",
"organizations_url": "https://api.github.com/users/TraceRecursion/orgs",
"repos_url": "https://api.github.com/users/TraceRecursion/repos",
"events_url": "https://api.github.com/users/TraceRecursion/events{/privacy}",
"received_events_url": "https://api.github.com/users/TraceRecursion/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2024-03-13T15:50:06 | 2024-03-14T15:05:39 | 2024-03-14T15:05:39 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Hello ollama team,
I am currently exploring the use of ollama to run models and am interested in implementing INT4 quantization to save on VRAM usage. I have read through the documentation but would appreciate some guidance on how to properly apply INT4 quantization during the model run.
Could you provide some insights or examples on how to integrate this quantization method with ollama? Specifically, I am looking for ways to reduce the VRAM footprint while maintaining performance.
Thank you for your assistance.
Best regards,
溯源递归 | {
"login": "TraceRecursion",
"id": 66545369,
"node_id": "MDQ6VXNlcjY2NTQ1MzY5",
"avatar_url": "https://avatars.githubusercontent.com/u/66545369?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/TraceRecursion",
"html_url": "https://github.com/TraceRecursion",
"followers_url": "https://api.github.com/users/TraceRecursion/followers",
"following_url": "https://api.github.com/users/TraceRecursion/following{/other_user}",
"gists_url": "https://api.github.com/users/TraceRecursion/gists{/gist_id}",
"starred_url": "https://api.github.com/users/TraceRecursion/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/TraceRecursion/subscriptions",
"organizations_url": "https://api.github.com/users/TraceRecursion/orgs",
"repos_url": "https://api.github.com/users/TraceRecursion/repos",
"events_url": "https://api.github.com/users/TraceRecursion/events{/privacy}",
"received_events_url": "https://api.github.com/users/TraceRecursion/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3114/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3114/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/4797 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/4797/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/4797/comments | https://api.github.com/repos/ollama/ollama/issues/4797/events | https://github.com/ollama/ollama/issues/4797 | 2,331,212,648 | I_kwDOJ0Z1Ps6K83to | 4,797 | Stop token behavior changes when specifying list of stop tokens | {
"login": "ccreutzi",
"id": 89011131,
"node_id": "MDQ6VXNlcjg5MDExMTMx",
"avatar_url": "https://avatars.githubusercontent.com/u/89011131?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ccreutzi",
"html_url": "https://github.com/ccreutzi",
"followers_url": "https://api.github.com/users/ccreutzi/followers",
"following_url": "https://api.github.com/users/ccreutzi/following{/other_user}",
"gists_url": "https://api.github.com/users/ccreutzi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ccreutzi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ccreutzi/subscriptions",
"organizations_url": "https://api.github.com/users/ccreutzi/orgs",
"repos_url": "https://api.github.com/users/ccreutzi/repos",
"events_url": "https://api.github.com/users/ccreutzi/events{/privacy}",
"received_events_url": "https://api.github.com/users/ccreutzi/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 7706482389,
"node_id": "LA_kwDOJ0Z1Ps8AAAABy1eW1Q",
"url": "https://api.github.com/repos/ollama/ollama/labels/api",
"name": "api",
"color": "bfdadc",
"default": false,
"description": ""
}
] | closed | false | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 1 | 2024-06-03T13:48:43 | 2024-11-12T01:35:36 | 2024-11-12T01:35:35 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
Setting a stop token of `"k=1 was"` for this call has no effect, as expected:
```
$ curl -s http://localhost:11434/api/chat -d '{
> "model": "mistral","options": {"top_k":1,"stop":["k=1 was"]},
> "stream": false,
> "messages":[{"role":"user","content":"Top-k sampling with k=1 returns a definite answer."}]
> }' | jq .message.content
" Yes, that's correct! Top-k sampling with k=1 is also known as \"taking the top element\" or \"greedy selection.\" It simply selects the highest value (or any other sortable attribute) from a dataset without replacement. Since it only returns one item, it guarantees a definite answer. This method is often used in various machine learning and data analysis applications where you want to focus on the most significant or representative element of your dataset."
```
Setting a top token of `"1"` also has the expected effect, of ending after `k=` in the (constant) output:
```
$ curl -s http://localhost:11434/api/chat -d '{
> "model": "mistral","options": {"top_k":1,"stop":["1"]},
> "stream": false,
> "messages":[{"role":"user","content":"Top-k sampling with k=1 returns a definite answer."}]
> }' | jq .message.content
" Yes, that's correct! Top-k sampling with k="
```
But combining the two results in the generated text ending before the `k=` tokens:
```
$ curl -s http://localhost:11434/api/chat -d '{
> "model": "mistral","options": {"top_k":1,"stop":["k=1 was","1"]},
> "stream": false,
> "messages":[{"role":"user","content":"Top-k sampling with k=1 returns a definite answer."}]
> }' | jq .message.content
" Yes, that's correct! Top-k sampling with"
```
### OS
macOS
### GPU
Apple
### CPU
Apple
### Ollama version
0.1.38, installed with Homebrew | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/4797/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/4797/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/3825 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/3825/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/3825/comments | https://api.github.com/repos/ollama/ollama/issues/3825/events | https://github.com/ollama/ollama/issues/3825 | 2,256,736,953 | I_kwDOJ0Z1Ps6GgxK5 | 3,825 | Updating to docker 0.1.29-rocm and beyond breaks detection of GPU (Radeon Pro W6600) | {
"login": "ic4-y",
"id": 61844926,
"node_id": "MDQ6VXNlcjYxODQ0OTI2",
"avatar_url": "https://avatars.githubusercontent.com/u/61844926?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ic4-y",
"html_url": "https://github.com/ic4-y",
"followers_url": "https://api.github.com/users/ic4-y/followers",
"following_url": "https://api.github.com/users/ic4-y/following{/other_user}",
"gists_url": "https://api.github.com/users/ic4-y/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ic4-y/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ic4-y/subscriptions",
"organizations_url": "https://api.github.com/users/ic4-y/orgs",
"repos_url": "https://api.github.com/users/ic4-y/repos",
"events_url": "https://api.github.com/users/ic4-y/events{/privacy}",
"received_events_url": "https://api.github.com/users/ic4-y/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396184,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA",
"url": "https://api.github.com/repos/ollama/ollama/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
},
{
"id": 6433346500,
"node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA",
"url": "https://api.github.com/repos/ollama/ollama/labels/amd",
"name": "amd",
"color": "000000",
"default": false,
"description": "Issues relating to AMD GPUs and ROCm"
}
] | closed | false | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
}
] | null | 19 | 2024-04-22T14:47:12 | 2024-05-04T21:20:20 | 2024-05-04T21:20:19 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | ### What is the issue?
When updating my docker stack from using the image `0.1.24-rocm` to newer versions in order to run some embeddings models that crashed otherwise, I noticed that `0.1.29-rocm` and above break GPU detection on my Radeon Pro W6600. The GPU works fine in `0.1.28-rocm`
On `0.1.32-rocm` I get the following error when trying to start generation:
```
ollama-rocm | rocBLAS error: Could not initialize Tensile host: No devices found
ollama-rocm | time=2024-04-22T14:33:01.535Z level=ERROR source=routes.go:120 msg="error loading llama server" error="llama runner process no longer running: -1 error:Could not initialize Tensile host: No devices found"
ollama-rocm | time=2024-04-22T14:33:01.535Z level=DEBUG source=server.go:832 msg="stopping llama server"
```
While on `0.1.28-rocm` it works just fine:
```
ollama-rocm | [1713796808] Performing pre-initialization of GPU
ollama-rocm | ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no
ollama-rocm | ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes
ollama-rocm | ggml_init_cublas: found 1 ROCm devices:
ollama-rocm | Device 0: AMD Radeon PRO W6600, compute capability 10.3, VMM: no
```
I am wondering if this is a duplicate of https://github.com/ollama/ollama/issues/3304 or maybe related. The problems started with `0.1.29-rocm` in my case as well.
I am running this ollama docker container in an LXC container on a Proxmox host with Dual Xeon v4 CPUs.
### OS
Docker in Linux LXC container running on Proxmox 8.1
### GPU
AMD Radeon Pro W6600
### CPU
Intel Xeon v4
### Ollama version
0.1.29 and above | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/3825/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/3825/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/6264 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/6264/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/6264/comments | https://api.github.com/repos/ollama/ollama/issues/6264/events | https://github.com/ollama/ollama/pull/6264 | 2,456,695,353 | PR_kwDOJ0Z1Ps534qxf | 6,264 | Parse cpuinfo and set default threads | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-08-08T21:51:23 | 2024-10-15T18:36:11 | 2024-10-15T18:36:08 | COLLABORATOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/6264",
"html_url": "https://github.com/ollama/ollama/pull/6264",
"diff_url": "https://github.com/ollama/ollama/pull/6264.diff",
"patch_url": "https://github.com/ollama/ollama/pull/6264.patch",
"merged_at": "2024-10-15T18:36:08"
} | Set the default thread count to the number of performance cores detected on the system. Without this change, the new Go server winds up picking `runtime.NumCPU` from Go, which equates to logical processors, and that results in thrashing on hyperthreading CPUs and poor CPU inference speed.
We need to reduce down to just the number of cores in a single socket given current limitations in the C++ code.
Fixes #5554 | {
"login": "dhiltgen",
"id": 4033016,
"node_id": "MDQ6VXNlcjQwMzMwMTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/dhiltgen",
"html_url": "https://github.com/dhiltgen",
"followers_url": "https://api.github.com/users/dhiltgen/followers",
"following_url": "https://api.github.com/users/dhiltgen/following{/other_user}",
"gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions",
"organizations_url": "https://api.github.com/users/dhiltgen/orgs",
"repos_url": "https://api.github.com/users/dhiltgen/repos",
"events_url": "https://api.github.com/users/dhiltgen/events{/privacy}",
"received_events_url": "https://api.github.com/users/dhiltgen/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/6264/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/6264/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/5575 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5575/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5575/comments | https://api.github.com/repos/ollama/ollama/issues/5575/events | https://github.com/ollama/ollama/pull/5575 | 2,398,478,044 | PR_kwDOJ0Z1Ps502TqN | 5,575 | Update README.md | {
"login": "elearningshow",
"id": 766298,
"node_id": "MDQ6VXNlcjc2NjI5OA==",
"avatar_url": "https://avatars.githubusercontent.com/u/766298?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/elearningshow",
"html_url": "https://github.com/elearningshow",
"followers_url": "https://api.github.com/users/elearningshow/followers",
"following_url": "https://api.github.com/users/elearningshow/following{/other_user}",
"gists_url": "https://api.github.com/users/elearningshow/gists{/gist_id}",
"starred_url": "https://api.github.com/users/elearningshow/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/elearningshow/subscriptions",
"organizations_url": "https://api.github.com/users/elearningshow/orgs",
"repos_url": "https://api.github.com/users/elearningshow/repos",
"events_url": "https://api.github.com/users/elearningshow/events{/privacy}",
"received_events_url": "https://api.github.com/users/elearningshow/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2024-07-09T15:10:35 | 2024-11-21T08:31:27 | 2024-11-21T08:31:27 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5575",
"html_url": "https://github.com/ollama/ollama/pull/5575",
"diff_url": "https://github.com/ollama/ollama/pull/5575.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5575.patch",
"merged_at": "2024-11-21T08:31:27"
} | I have created an easy to use GUI in python that would make a great addition to the Community Integrations
Web & Desktop section.
- [Ollama-Kis](https://github.com/elearningshow/ollama-kis) (A simple easy to use GUI with sample custom LLM for Drivers Education)
| {
"login": "mchiang0610",
"id": 3325447,
"node_id": "MDQ6VXNlcjMzMjU0NDc=",
"avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mchiang0610",
"html_url": "https://github.com/mchiang0610",
"followers_url": "https://api.github.com/users/mchiang0610/followers",
"following_url": "https://api.github.com/users/mchiang0610/following{/other_user}",
"gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions",
"organizations_url": "https://api.github.com/users/mchiang0610/orgs",
"repos_url": "https://api.github.com/users/mchiang0610/repos",
"events_url": "https://api.github.com/users/mchiang0610/events{/privacy}",
"received_events_url": "https://api.github.com/users/mchiang0610/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5575/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5575/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/204 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/204/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/204/comments | https://api.github.com/repos/ollama/ollama/issues/204/events | https://github.com/ollama/ollama/issues/204 | 1,819,267,669 | I_kwDOJ0Z1Ps5sb9JV | 204 | Consider Using Standard Config Format | {
"login": "nazimamin",
"id": 4207188,
"node_id": "MDQ6VXNlcjQyMDcxODg=",
"avatar_url": "https://avatars.githubusercontent.com/u/4207188?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/nazimamin",
"html_url": "https://github.com/nazimamin",
"followers_url": "https://api.github.com/users/nazimamin/followers",
"following_url": "https://api.github.com/users/nazimamin/following{/other_user}",
"gists_url": "https://api.github.com/users/nazimamin/gists{/gist_id}",
"starred_url": "https://api.github.com/users/nazimamin/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nazimamin/subscriptions",
"organizations_url": "https://api.github.com/users/nazimamin/orgs",
"repos_url": "https://api.github.com/users/nazimamin/repos",
"events_url": "https://api.github.com/users/nazimamin/events{/privacy}",
"received_events_url": "https://api.github.com/users/nazimamin/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5667396200,
"node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request",
"name": "feature request",
"color": "a2eeef",
"default": false,
"description": "New feature or request"
},
{
"id": 6100196012,
"node_id": "LA_kwDOJ0Z1Ps8AAAABa5marA",
"url": "https://api.github.com/repos/ollama/ollama/labels/feedback%20wanted",
"name": "feedback wanted",
"color": "0e8a16",
"default": false,
"description": ""
}
] | open | false | null | [] | null | 7 | 2023-07-24T22:58:37 | 2024-05-31T21:49:50 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | Thank you for your work, this is great and will be very helpful for the OSS community. The custom configuration file named "Modelfile" works well in the context of this project.
I would like to discuss the possibility of using a standardized config format such as JSON5, TOML, YAML, or another similar standard. Those are battle-tested, easy to read and maintain, and have extensive tooling support. Using custom config language makes it harder for people to adapt quickly and makes it hard to build tooling support on top of it. Thoughts? | {
"login": "pdevine",
"id": 75239,
"node_id": "MDQ6VXNlcjc1MjM5",
"avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pdevine",
"html_url": "https://github.com/pdevine",
"followers_url": "https://api.github.com/users/pdevine/followers",
"following_url": "https://api.github.com/users/pdevine/following{/other_user}",
"gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pdevine/subscriptions",
"organizations_url": "https://api.github.com/users/pdevine/orgs",
"repos_url": "https://api.github.com/users/pdevine/repos",
"events_url": "https://api.github.com/users/pdevine/events{/privacy}",
"received_events_url": "https://api.github.com/users/pdevine/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/204/reactions",
"total_count": 5,
"+1": 4,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/204/timeline | null | reopened | false |
https://api.github.com/repos/ollama/ollama/issues/7569 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/7569/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/7569/comments | https://api.github.com/repos/ollama/ollama/issues/7569/events | https://github.com/ollama/ollama/issues/7569 | 2,643,207,265 | I_kwDOJ0Z1Ps6djCRh | 7,569 | I wanted to the add Donut LLM model which seems to be not supported at the moment | {
"login": "KIC",
"id": 10957396,
"node_id": "MDQ6VXNlcjEwOTU3Mzk2",
"avatar_url": "https://avatars.githubusercontent.com/u/10957396?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/KIC",
"html_url": "https://github.com/KIC",
"followers_url": "https://api.github.com/users/KIC/followers",
"following_url": "https://api.github.com/users/KIC/following{/other_user}",
"gists_url": "https://api.github.com/users/KIC/gists{/gist_id}",
"starred_url": "https://api.github.com/users/KIC/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/KIC/subscriptions",
"organizations_url": "https://api.github.com/users/KIC/orgs",
"repos_url": "https://api.github.com/users/KIC/repos",
"events_url": "https://api.github.com/users/KIC/events{/privacy}",
"received_events_url": "https://api.github.com/users/KIC/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [
{
"id": 5789807732,
"node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA",
"url": "https://api.github.com/repos/ollama/ollama/labels/model%20request",
"name": "model request",
"color": "1E5DE6",
"default": false,
"description": "Model requests"
}
] | open | false | null | [] | null | 0 | 2024-11-08T08:01:23 | 2024-11-08T08:01:23 | null | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | after cloning: https://huggingface.co/docs/transformers/en/model_doc/donut
I have tried to run
`docker run --rm -v .:/model ollama/quantize -q q8_0 /model` but it fails with:
`unknown architecture VisionEncoderDecoderModel`
I think one can never have enough vision models, so please add support for Donut models and its fine tunings.
| null | {
"url": "https://api.github.com/repos/ollama/ollama/issues/7569/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/7569/timeline | null | null | false |
https://api.github.com/repos/ollama/ollama/issues/1503 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/1503/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/1503/comments | https://api.github.com/repos/ollama/ollama/issues/1503/events | https://github.com/ollama/ollama/issues/1503 | 2,039,983,770 | I_kwDOJ0Z1Ps55l66a | 1,503 | Invalid Opcode Error in Ubuntu Server | {
"login": "Gyarados",
"id": 5567681,
"node_id": "MDQ6VXNlcjU1Njc2ODE=",
"avatar_url": "https://avatars.githubusercontent.com/u/5567681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Gyarados",
"html_url": "https://github.com/Gyarados",
"followers_url": "https://api.github.com/users/Gyarados/followers",
"following_url": "https://api.github.com/users/Gyarados/following{/other_user}",
"gists_url": "https://api.github.com/users/Gyarados/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Gyarados/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Gyarados/subscriptions",
"organizations_url": "https://api.github.com/users/Gyarados/orgs",
"repos_url": "https://api.github.com/users/Gyarados/repos",
"events_url": "https://api.github.com/users/Gyarados/events{/privacy}",
"received_events_url": "https://api.github.com/users/Gyarados/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2023-12-13T15:43:36 | 2023-12-13T17:28:38 | 2023-12-13T17:28:37 | NONE | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | null | null | When trying to run any model in Ubuntu Server, locally and in a container, I get the following messages in the Ollama logs:
```
$ journalctl -u ollama -f
Dec 13 15:28:54 desimachine ollama[1471335]: 2023/12/13 15:28:54 download.go:123: downloading 58e1b82a691f in 1 18 B part(s)
Dec 13 15:28:58 desimachine ollama[1471335]: 2023/12/13 15:28:58 download.go:123: downloading 658e00cf526b in 1 529 B part(s)
Dec 13 15:29:09 desimachine ollama[1471335]: [GIN] 2023/12/13 - 15:29:09 | 200 | 2m53s | 127.0.0.1 | POST "/api/pull"
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:397: skipping accelerated runner because num_gpu=0
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:434: starting llama runner
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:492: waiting for llama runner to start responding
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:449: signal: illegal instruction (core dumped)
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:457: error starting llama runner: llama runner process has terminated
Dec 13 15:29:10 desimachine ollama[1471335]: 2023/12/13 15:29:10 llama.go:523: llama runner stopped successfully
```
And this is the log from the kernel:
```
$ sudo dmesg
...
[67864.232068] traps: ollama-runner[1485327] trap invalid opcode ip:5080dc sp:7ffd98094950 error:0 in ollama-runner[408000+16d000]
...
```
This is my OS version:
```
$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 22.04.3 LTS
Release: 22.04
Codename: jammy
```
My CPU is an Intel Celeron N4020.
Couldn't find much information about this online, other than the fact that this error message from the kernel is about an invalid opcode in the instruction, meaning it is not implemented by the CPU designer.
I also have a Windows laptop with an i7 where ollama worked perfectly using Docker.
Any tips? | {
"login": "Gyarados",
"id": 5567681,
"node_id": "MDQ6VXNlcjU1Njc2ODE=",
"avatar_url": "https://avatars.githubusercontent.com/u/5567681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Gyarados",
"html_url": "https://github.com/Gyarados",
"followers_url": "https://api.github.com/users/Gyarados/followers",
"following_url": "https://api.github.com/users/Gyarados/following{/other_user}",
"gists_url": "https://api.github.com/users/Gyarados/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Gyarados/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Gyarados/subscriptions",
"organizations_url": "https://api.github.com/users/Gyarados/orgs",
"repos_url": "https://api.github.com/users/Gyarados/repos",
"events_url": "https://api.github.com/users/Gyarados/events{/privacy}",
"received_events_url": "https://api.github.com/users/Gyarados/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/1503/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/1503/timeline | null | completed | false |
https://api.github.com/repos/ollama/ollama/issues/5626 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/5626/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/5626/comments | https://api.github.com/repos/ollama/ollama/issues/5626/events | https://github.com/ollama/ollama/pull/5626 | 2,402,232,109 | PR_kwDOJ0Z1Ps51C7KL | 5,626 | sched: error on over-allocation of system memory when on Linux | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2024-07-11T04:40:08 | 2024-07-11T07:53:14 | 2024-07-11T07:53:12 | MEMBER | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/5626",
"html_url": "https://github.com/ollama/ollama/pull/5626",
"diff_url": "https://github.com/ollama/ollama/pull/5626.diff",
"patch_url": "https://github.com/ollama/ollama/pull/5626.patch",
"merged_at": "2024-07-11T07:53:12"
} | Model switching no longer works on CPU-only machines and the scheduler instead errors with `requested model is too large for this system` error:
```
$ ollama run gemma2
Error: requested model (8.4 GiB) is too large for this system (1.9 GiB)
```
This PR changes this behavior to only stop a new model from loading if a crash will take place from over-allocating system memory on Linux. It also moves the check until after scheduling has taken place to avoid an error before knowing if another model would be unloaded.
Example on a 48GB VRAM system with 64GB of system memory
```
$ ollama run llama3:70b-instruct-fp16
Error: requested model requires more system memory (86.8 GiB) than is available (62.5 GiB)
``` | {
"login": "jmorganca",
"id": 251292,
"node_id": "MDQ6VXNlcjI1MTI5Mg==",
"avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jmorganca",
"html_url": "https://github.com/jmorganca",
"followers_url": "https://api.github.com/users/jmorganca/followers",
"following_url": "https://api.github.com/users/jmorganca/following{/other_user}",
"gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions",
"organizations_url": "https://api.github.com/users/jmorganca/orgs",
"repos_url": "https://api.github.com/users/jmorganca/repos",
"events_url": "https://api.github.com/users/jmorganca/events{/privacy}",
"received_events_url": "https://api.github.com/users/jmorganca/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/5626/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/5626/timeline | null | null | true |
https://api.github.com/repos/ollama/ollama/issues/529 | https://api.github.com/repos/ollama/ollama | https://api.github.com/repos/ollama/ollama/issues/529/labels{/name} | https://api.github.com/repos/ollama/ollama/issues/529/comments | https://api.github.com/repos/ollama/ollama/issues/529/events | https://github.com/ollama/ollama/pull/529 | 1,896,668,950 | PR_kwDOJ0Z1Ps5aWTfc | 529 | add examples of streaming in python and node | {
"login": "technovangelist",
"id": 633681,
"node_id": "MDQ6VXNlcjYzMzY4MQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/technovangelist",
"html_url": "https://github.com/technovangelist",
"followers_url": "https://api.github.com/users/technovangelist/followers",
"following_url": "https://api.github.com/users/technovangelist/following{/other_user}",
"gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}",
"starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions",
"organizations_url": "https://api.github.com/users/technovangelist/orgs",
"repos_url": "https://api.github.com/users/technovangelist/repos",
"events_url": "https://api.github.com/users/technovangelist/events{/privacy}",
"received_events_url": "https://api.github.com/users/technovangelist/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2023-09-14T14:13:26 | 2023-09-18T16:53:41 | 2023-09-18T16:53:41 | CONTRIBUTOR | {
"total": 0,
"completed": 0,
"percent_completed": 0
} | null | false | {
"url": "https://api.github.com/repos/ollama/ollama/pulls/529",
"html_url": "https://github.com/ollama/ollama/pull/529",
"diff_url": "https://github.com/ollama/ollama/pull/529.diff",
"patch_url": "https://github.com/ollama/ollama/pull/529.patch",
"merged_at": null
} | null | {
"login": "mchiang0610",
"id": 3325447,
"node_id": "MDQ6VXNlcjMzMjU0NDc=",
"avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mchiang0610",
"html_url": "https://github.com/mchiang0610",
"followers_url": "https://api.github.com/users/mchiang0610/followers",
"following_url": "https://api.github.com/users/mchiang0610/following{/other_user}",
"gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions",
"organizations_url": "https://api.github.com/users/mchiang0610/orgs",
"repos_url": "https://api.github.com/users/mchiang0610/repos",
"events_url": "https://api.github.com/users/mchiang0610/events{/privacy}",
"received_events_url": "https://api.github.com/users/mchiang0610/received_events",
"type": "User",
"user_view_type": "public",
"site_admin": false
} | {
"url": "https://api.github.com/repos/ollama/ollama/issues/529/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/ollama/ollama/issues/529/timeline | null | null | true |