github-actions[bot] commited on
Commit
71759d6
Β·
0 Parent(s):

Sync from https://github.com/felladrin/MiniSearch

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .dockerignore +25 -0
  2. .editorconfig +7 -0
  3. .env.example +33 -0
  4. .github/workflows/ai-review.yml +129 -0
  5. .github/workflows/deploy.yml +54 -0
  6. .github/workflows/on-pull-request-to-main.yml +9 -0
  7. .github/workflows/on-push-to-main.yml +7 -0
  8. .github/workflows/reusable-test-lint-ping.yml +25 -0
  9. .github/workflows/update-searxng-docker-image.yml +44 -0
  10. .gitignore +7 -0
  11. .husky/pre-commit +1 -0
  12. .npmrc +1 -0
  13. Dockerfile +107 -0
  14. README.md +139 -0
  15. biome.json +30 -0
  16. client/components/AiResponse/AiModelDownloadAllowanceContent.tsx +62 -0
  17. client/components/AiResponse/AiResponseContent.tsx +209 -0
  18. client/components/AiResponse/AiResponseSection.tsx +105 -0
  19. client/components/AiResponse/ChatInterface.tsx +186 -0
  20. client/components/AiResponse/CopyIconButton.tsx +32 -0
  21. client/components/AiResponse/FormattedMarkdown.tsx +118 -0
  22. client/components/AiResponse/LoadingModelContent.tsx +40 -0
  23. client/components/AiResponse/PreparingContent.tsx +33 -0
  24. client/components/AiResponse/WebLlmModelSelect.tsx +81 -0
  25. client/components/AiResponse/WllamaModelSelect.tsx +42 -0
  26. client/components/App/App.tsx +94 -0
  27. client/components/Logs/LogsModal.tsx +101 -0
  28. client/components/Logs/ShowLogsButton.tsx +42 -0
  29. client/components/Pages/AccessPage.tsx +61 -0
  30. client/components/Pages/Main/MainPage.tsx +60 -0
  31. client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx +17 -0
  32. client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx +60 -0
  33. client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx +44 -0
  34. client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx +57 -0
  35. client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx +98 -0
  36. client/components/Pages/Main/Menu/AISettings/hooks/useHordeModels.ts +35 -0
  37. client/components/Pages/Main/Menu/AISettings/hooks/useHordeUserInfo.ts +43 -0
  38. client/components/Pages/Main/Menu/AISettings/hooks/useOpenAiModels.ts +52 -0
  39. client/components/Pages/Main/Menu/AISettings/index.tsx +128 -0
  40. client/components/Pages/Main/Menu/AISettings/types.ts +28 -0
  41. client/components/Pages/Main/Menu/AISettingsForm.tsx +492 -0
  42. client/components/Pages/Main/Menu/ActionsForm.tsx +18 -0
  43. client/components/Pages/Main/Menu/ClearDataButton.tsx +63 -0
  44. client/components/Pages/Main/Menu/InterfaceSettingsForm.tsx +45 -0
  45. client/components/Pages/Main/Menu/MenuButton.tsx +53 -0
  46. client/components/Pages/Main/Menu/MenuDrawer.tsx +116 -0
  47. client/components/Pages/Main/Menu/SearchSettingsForm.tsx +52 -0
  48. client/components/Pages/Main/Menu/VoiceSettingsForm.tsx +72 -0
  49. client/components/Search/Form/SearchForm.tsx +131 -0
  50. client/components/Search/Results/Graphical/ImageResultsList.tsx +122 -0
.dockerignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
25
+
.editorconfig ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [*]
2
+ charset = utf-8
3
+ insert_final_newline = true
4
+ end_of_line = lf
5
+ indent_style = space
6
+ indent_size = 2
7
+ max_line_length = 80
.env.example ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A comma-separated list of access keys. Example: `ACCESS_KEYS="ABC123,JUD71F,HUWE3"`. Leave blank for unrestricted access.
2
+ ACCESS_KEYS=""
3
+
4
+ # The timeout in hours for access key validation. Set to 0 to require validation on every page load.
5
+ ACCESS_KEY_TIMEOUT_HOURS="24"
6
+
7
+ # The default model ID for WebLLM with F16 shaders.
8
+ WEBLLM_DEFAULT_F16_MODEL_ID="Qwen2.5-0.5B-Instruct-q4f16_1-MLC"
9
+
10
+ # The default model ID for WebLLM with F32 shaders.
11
+ WEBLLM_DEFAULT_F32_MODEL_ID="Qwen2.5-0.5B-Instruct-q4f32_1-MLC"
12
+
13
+ # The default model ID for Wllama.
14
+ WLLAMA_DEFAULT_MODEL_ID="qwen-2.5-0.5b"
15
+
16
+ # The base URL for the internal OpenAI compatible API. Example: `INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL="https://api.openai.com/v1"`. Leave blank to disable internal OpenAI compatible API.
17
+ INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL=""
18
+
19
+ # The access key for the internal OpenAI compatible API.
20
+ INTERNAL_OPENAI_COMPATIBLE_API_KEY=""
21
+
22
+ # The model for the internal OpenAI compatible API.
23
+ INTERNAL_OPENAI_COMPATIBLE_API_MODEL=""
24
+
25
+ # The name of the internal OpenAI compatible API, displayed in the UI.
26
+ INTERNAL_OPENAI_COMPATIBLE_API_NAME="Internal API"
27
+
28
+ # The type of inference to use by default. The possible values are:
29
+ # "browser" -> In the browser (Private)
30
+ # "openai" -> Remote Server (API)
31
+ # "horde" -> AI Horde (Pre-configured)
32
+ # "internal" -> $INTERNAL_OPENAI_COMPATIBLE_API_NAME
33
+ DEFAULT_INFERENCE_TYPE="browser"
.github/workflows/ai-review.yml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Review Pull Request with AI
2
+
3
+ on:
4
+ pull_request:
5
+ types: [opened, synchronize, reopened]
6
+ branches: ["main"]
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ ai-review:
14
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-ai-review') }}
15
+ continue-on-error: true
16
+ runs-on: ubuntu-latest
17
+ name: AI Review
18
+ permissions:
19
+ pull-requests: write
20
+ contents: read
21
+ timeout-minutes: 30
22
+ env:
23
+ OPENROUTER_MODEL: deepseek/deepseek-chat
24
+ steps:
25
+ - name: Checkout Repository
26
+ uses: actions/checkout@v4
27
+
28
+ - name: Create temporary directory
29
+ run: mkdir -p /tmp/pr_review
30
+
31
+ - name: Process PR description
32
+ id: process_pr
33
+ run: |
34
+ PR_BODY_ESCAPED=$(cat << 'EOF'
35
+ ${{ github.event.pull_request.body }}
36
+ EOF
37
+ )
38
+ PROCESSED_BODY=$(echo "$PR_BODY_ESCAPED" | sed -E 's/\[(.*?)\]\(.*?\)/\1/g')
39
+ echo "$PROCESSED_BODY" > /tmp/pr_review/processed_body.txt
40
+
41
+ - name: Fetch branches and output the diff
42
+ run: |
43
+ git fetch origin main:main
44
+ git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-branch
45
+ git diff main..pr-branch > /tmp/pr_review/diff.txt
46
+
47
+ - name: Prepare review request
48
+ id: prepare_request
49
+ run: |
50
+ PR_TITLE=$(echo "${{ github.event.pull_request.title }}" | sed 's/[()]/\\&/g')
51
+ DIFF_CONTENT=$(cat /tmp/pr_review/diff.txt)
52
+ PROCESSED_BODY=$(cat /tmp/pr_review/processed_body.txt)
53
+
54
+ jq -n \
55
+ --arg model "$OPENROUTER_MODEL" \
56
+ --arg system "You are an experienced developer reviewing a Pull Request. You focus only on what matters and provide concise, actionable feedback.
57
+
58
+ Review Context:
59
+ Repository Name: \"${{ github.event.repository.name }}\"
60
+ Repository Description: \"${{ github.event.repository.description }}\"
61
+ Branch: \"${{ github.event.pull_request.head.ref }}\"
62
+ PR Title: \"$PR_TITLE\"
63
+
64
+ Guidelines:
65
+ 1. Only comment on issues that:
66
+ - Could cause bugs or security issues
67
+ - Significantly impact performance
68
+ - Make the code harder to maintain
69
+ - Violate critical best practices
70
+
71
+ 2. For each issue:
72
+ - Point to the specific line/file
73
+ - Explain why it's a problem
74
+ - Suggest a concrete fix
75
+
76
+ 3. Praise exceptional solutions briefly, only if truly innovative
77
+
78
+ 4. Skip commenting on:
79
+ - Minor style issues
80
+ - Obvious changes
81
+ - Working code that could be marginally improved
82
+ - Things that are just personal preference
83
+
84
+ Remember:
85
+ Less is more. If the code is good and working, just say so, with a short message." \
86
+ --arg user "This is the description of the pull request:
87
+ \`\`\`markdown
88
+ $PROCESSED_BODY
89
+ \`\`\`
90
+
91
+ And here is the diff of the changes, for you to review:
92
+ \`\`\`diff
93
+ $DIFF_CONTENT
94
+ \`\`\`" \
95
+ '{
96
+ "model": $model,
97
+ "messages": [
98
+ {"role": "system", "content": $system},
99
+ {"role": "user", "content": $user}
100
+ ]
101
+ }' > /tmp/pr_review/request.json
102
+
103
+ - name: Get AI Review
104
+ id: ai_review
105
+ run: |
106
+ RESPONSE=$(curl -s https://openrouter.ai/api/v1/chat/completions \
107
+ -H "Content-Type: application/json" \
108
+ -H "Authorization: Bearer ${{ secrets.OPENROUTER_API_KEY }}" \
109
+ -d @/tmp/pr_review/request.json)
110
+
111
+ echo "### Review" > /tmp/pr_review/response.txt
112
+ echo "" >> /tmp/pr_review/response.txt
113
+ echo "$RESPONSE" | jq -r '.choices[0].message.content' >> /tmp/pr_review/response.txt
114
+
115
+ - name: Find Comment
116
+ uses: peter-evans/find-comment@v3
117
+ id: find_comment
118
+ with:
119
+ issue-number: ${{ github.event.pull_request.number }}
120
+ comment-author: "github-actions[bot]"
121
+ body-includes: "### Review"
122
+
123
+ - name: Post or Update PR Review
124
+ uses: peter-evans/create-or-update-comment@v4
125
+ with:
126
+ comment-id: ${{ steps.find_comment.outputs.comment-id }}
127
+ issue-number: ${{ github.event.pull_request.number }}
128
+ body-path: /tmp/pr_review/response.txt
129
+ edit-mode: replace
.github/workflows/deploy.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ build-and-push-image:
8
+ name: Publish Docker image to GitHub Packages
9
+ runs-on: ubuntu-latest
10
+ env:
11
+ REGISTRY: ghcr.io
12
+ IMAGE_NAME: ${{ github.repository }}
13
+ permissions:
14
+ contents: read
15
+ packages: write
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v4
19
+ - name: Log in to the Container registry
20
+ uses: docker/login-action@v3
21
+ with:
22
+ registry: ${{ env.REGISTRY }}
23
+ username: ${{ github.actor }}
24
+ password: ${{ secrets.GITHUB_TOKEN }}
25
+ - name: Extract metadata (tags, labels) for Docker
26
+ id: meta
27
+ uses: docker/metadata-action@v5
28
+ with:
29
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
30
+ - name: Set up Docker Buildx
31
+ uses: docker/setup-buildx-action@v3
32
+ - name: Build and push Docker image
33
+ uses: docker/build-push-action@v6
34
+ with:
35
+ context: .
36
+ push: true
37
+ tags: ${{ steps.meta.outputs.tags }}
38
+ labels: ${{ steps.meta.outputs.labels }}
39
+ platforms: linux/amd64,linux/arm64
40
+
41
+ sync-to-hf:
42
+ name: Sync to HuggingFace Spaces
43
+ runs-on: ubuntu-latest
44
+ steps:
45
+ - uses: actions/checkout@v4
46
+ with:
47
+ lfs: true
48
+ - uses: JacobLinCool/huggingface-sync@v1
49
+ with:
50
+ github: ${{ secrets.GITHUB_TOKEN }}
51
+ user: ${{ vars.HF_SPACE_OWNER }}
52
+ space: ${{ vars.HF_SPACE_NAME }}
53
+ token: ${{ secrets.HF_TOKEN }}
54
+ configuration: "hf-space-config.yml"
.github/workflows/on-pull-request-to-main.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ name: On Pull Request To Main
2
+ on:
3
+ pull_request:
4
+ types: [opened, synchronize, reopened]
5
+ branches: ["main"]
6
+ jobs:
7
+ test-lint-ping:
8
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-test-lint-ping') }}
9
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
.github/workflows/on-push-to-main.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: On Push To Main
2
+ on:
3
+ push:
4
+ branches: ["main"]
5
+ jobs:
6
+ test-lint-ping:
7
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
.github/workflows/reusable-test-lint-ping.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ on:
2
+ workflow_call:
3
+ jobs:
4
+ check-code-quality:
5
+ name: Check Code Quality
6
+ runs-on: ubuntu-latest
7
+ steps:
8
+ - uses: actions/checkout@v4
9
+ - uses: actions/setup-node@v4
10
+ with:
11
+ node-version: 20
12
+ cache: "npm"
13
+ - run: npm ci --ignore-scripts
14
+ - run: npm test
15
+ - run: npm run lint
16
+ check-docker-container:
17
+ needs: [check-code-quality]
18
+ name: Check Docker Container
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+ - run: docker compose -f docker-compose.production.yml up -d
23
+ - name: Check if main page is available
24
+ run: until curl -s -o /dev/null -w "%{http_code}" localhost:7860 | grep 200; do sleep 1; done
25
+ - run: docker compose -f docker-compose.production.yml down
.github/workflows/update-searxng-docker-image.yml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Update SearXNG Docker Image
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 14 * * *"
6
+ workflow_dispatch:
7
+
8
+ permissions:
9
+ contents: write
10
+
11
+ jobs:
12
+ update-searxng-image:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v4
17
+ with:
18
+ token: ${{ secrets.GITHUB_TOKEN }}
19
+
20
+ - name: Get latest SearXNG image tag
21
+ id: get_latest_tag
22
+ run: |
23
+ LATEST_TAG=$(curl -s "https://hub.docker.com/v2/repositories/searxng/searxng/tags/?page_size=3&ordering=last_updated" | jq -r '.results[] | select(.name != "latest-build-cache" and .name != "latest") | .name' | head -n 1)
24
+ echo "LATEST_TAG=${LATEST_TAG}" >> $GITHUB_OUTPUT
25
+
26
+ - name: Update Dockerfile
27
+ run: |
28
+ sed -i 's|FROM searxng/searxng:.*|FROM searxng/searxng:${{ steps.get_latest_tag.outputs.LATEST_TAG }}|' Dockerfile
29
+
30
+ - name: Check for changes
31
+ id: git_status
32
+ run: |
33
+ git diff --exit-code || echo "changes=true" >> $GITHUB_OUTPUT
34
+
35
+ - name: Commit and push if changed
36
+ if: steps.git_status.outputs.changes == 'true'
37
+ run: |
38
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
39
+ git config --local user.name "github-actions[bot]"
40
+ git add Dockerfile
41
+ git commit -m "Update SearXNG Docker image to tag ${{ steps.get_latest_tag.outputs.LATEST_TAG }}"
42
+ git push
43
+ env:
44
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ node_modules
2
+ .DS_Store
3
+ /client/dist
4
+ /server/models
5
+ .vscode
6
+ /vite-build-stats.html
7
+ .env
.husky/pre-commit ADDED
@@ -0,0 +1 @@
 
 
1
+ npx lint-staged
.npmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ legacy-peer-deps = true
Dockerfile ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Build llama.cpp in a separate stage
2
+ FROM alpine:3.21 AS llama-builder
3
+
4
+ # Install build dependencies
5
+ RUN apk add --update \
6
+ build-base \
7
+ cmake \
8
+ ccache \
9
+ git
10
+
11
+ # Build llama.cpp server and collect libraries
12
+ RUN cd /tmp && \
13
+ git clone https://github.com/ggerganov/llama.cpp.git --depth=1 && \
14
+ cd llama.cpp && \
15
+ cmake -B build -DGGML_NATIVE=OFF && \
16
+ cmake --build build --config Release -j --target llama-server && \
17
+ mkdir -p /usr/local/lib/llama && \
18
+ find build -type f \( -name "libllama.so" -o -name "libggml.so" -o -name "libggml-base.so" -o -name "libggml-cpu.so" \) -exec cp {} /usr/local/lib/llama/ \;
19
+
20
+ # Use the SearXNG image as the base for final image
21
+ FROM searxng/searxng:2025.2.9-a1e2b2546
22
+
23
+ # Set the default port to 7860 if not provided
24
+ ENV PORT=7860
25
+
26
+ # Expose the port specified by the PORT environment variable
27
+ EXPOSE $PORT
28
+
29
+ # Install necessary packages using Alpine's package manager
30
+ RUN apk add --update \
31
+ nodejs \
32
+ npm \
33
+ git \
34
+ build-base
35
+
36
+ # Copy llama.cpp artifacts from builder
37
+ COPY --from=llama-builder /tmp/llama.cpp/build/bin/llama-server /usr/local/bin/
38
+ COPY --from=llama-builder /usr/local/lib/llama/* /usr/local/lib/
39
+ RUN ldconfig /usr/local/lib
40
+
41
+ # Set the SearXNG settings folder path
42
+ ARG SEARXNG_SETTINGS_FOLDER=/etc/searxng
43
+
44
+ # Modify SearXNG configuration:
45
+ # 1. Change output format from HTML to JSON
46
+ # 2. Remove user switching in the entrypoint script
47
+ # 3. Create and set permissions for the settings folder
48
+ RUN sed -i 's/- html/- json/' /usr/local/searxng/searx/settings.yml \
49
+ && sed -i 's/su-exec searxng:searxng //' /usr/local/searxng/dockerfiles/docker-entrypoint.sh \
50
+ && mkdir -p ${SEARXNG_SETTINGS_FOLDER} \
51
+ && chmod 777 ${SEARXNG_SETTINGS_FOLDER}
52
+
53
+ # Set up user and directory structure
54
+ ARG USERNAME=user
55
+ ARG HOME_DIR=/home/${USERNAME}
56
+ ARG APP_DIR=${HOME_DIR}/app
57
+
58
+ # Create a non-root user and set up the application directory
59
+ RUN adduser -D -u 1000 ${USERNAME} \
60
+ && mkdir -p ${APP_DIR} \
61
+ && chown -R ${USERNAME}:${USERNAME} ${HOME_DIR}
62
+
63
+ # Switch to the non-root user
64
+ USER ${USERNAME}
65
+
66
+ # Set the working directory to the application directory
67
+ WORKDIR ${APP_DIR}
68
+
69
+ # Define environment variables that can be passed to the container during build.
70
+ # This approach allows for dynamic configuration without relying on a `.env` file,
71
+ # which might not be suitable for all deployment scenarios.
72
+ ARG ACCESS_KEYS
73
+ ARG ACCESS_KEY_TIMEOUT_HOURS
74
+ ARG WEBLLM_DEFAULT_F16_MODEL_ID
75
+ ARG WEBLLM_DEFAULT_F32_MODEL_ID
76
+ ARG WLLAMA_DEFAULT_MODEL_ID
77
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL
78
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_KEY
79
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_MODEL
80
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_NAME
81
+ ARG DEFAULT_INFERENCE_TYPE
82
+ ARG HOST
83
+ ARG HMR_PORT
84
+ ARG ALLOWED_HOSTS
85
+
86
+ # Copy package.json, package-lock.json, and .npmrc files
87
+ COPY --chown=${USERNAME}:${USERNAME} ./package.json ./package.json
88
+ COPY --chown=${USERNAME}:${USERNAME} ./package-lock.json ./package-lock.json
89
+ COPY --chown=${USERNAME}:${USERNAME} ./.npmrc ./.npmrc
90
+
91
+ # Install Node.js dependencies
92
+ RUN npm ci
93
+
94
+ # Copy the rest of the application files
95
+ COPY --chown=${USERNAME}:${USERNAME} . .
96
+
97
+ # Configure Git to treat the app directory as safe
98
+ RUN git config --global --add safe.directory ${APP_DIR}
99
+
100
+ # Build the application
101
+ RUN npm run build
102
+
103
+ # Set the entrypoint to use a shell
104
+ ENTRYPOINT [ "/bin/sh", "-c" ]
105
+
106
+ # Run SearXNG in the background and start the Node.js application using PM2
107
+ CMD [ "(/usr/local/searxng/dockerfiles/docker-entrypoint.sh -f > /dev/null 2>&1) & (npx pm2 start ecosystem.config.cjs && npx pm2 logs production-server)" ]
README.md ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MiniSearch
3
+ emoji: πŸ‘ŒπŸ”
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
+ sdk: docker
7
+ short_description: Minimalist web-searching app with browser-based AI assistant
8
+ pinned: true
9
+ custom_headers:
10
+ cross-origin-embedder-policy: require-corp
11
+ cross-origin-opener-policy: same-origin
12
+ cross-origin-resource-policy: cross-origin
13
+ ---
14
+
15
+ # MiniSearch
16
+
17
+ A minimalist web-searching app with an AI assistant that runs directly from your browser.
18
+
19
+ Live demo: https://felladrin-minisearch.hf.space
20
+
21
+ ## Screenshot
22
+
23
+ ![MiniSearch Screenshot](https://github.com/user-attachments/assets/f8d72a8e-a725-42e9-9358-e6ebade2acb2)
24
+
25
+ ## Features
26
+
27
+ - **Privacy-focused**: [No tracking, no ads, no data collection](https://docs.searxng.org/own-instance.html#how-does-searxng-protect-privacy)
28
+ - **Easy to use**: Minimalist yet intuitive interface for all users
29
+ - **Cross-platform**: Models run inside the browser, both on desktop and mobile
30
+ - **Integrated**: Search from the browser address bar by setting it as the default search engine
31
+ - **Efficient**: Models are loaded and cached only when needed
32
+ - **Customizable**: Tweakable settings for search results and text generation
33
+ - **Open-source**: [The code is available for inspection and contribution at GitHub](https://github.com/felladrin/MiniSearch)
34
+
35
+ ## Prerequisites
36
+
37
+ - [Docker](https://docs.docker.com/get-docker/)
38
+
39
+ ## Getting started
40
+
41
+ Here are the easiest ways to get started with MiniSearch. Pick the one that suits you best.
42
+
43
+ **Option 1** - Use [MiniSearch's Docker Image](https://github.com/felladrin/MiniSearch/pkgs/container/minisearch) by running in your terminal:
44
+
45
+ ```bash
46
+ docker run -p 7860:7860 ghcr.io/felladrin/minisearch:main
47
+ ```
48
+
49
+ **Option 2** - Add MiniSearch's Docker Image to your existing Docker Compose file:
50
+
51
+ ```yaml
52
+ services:
53
+ minisearch:
54
+ image: ghcr.io/felladrin/minisearch:main
55
+ ports:
56
+ - "7860:7860"
57
+ ```
58
+
59
+ **Option 3** - Build from source by [downloading the repository files](https://github.com/felladrin/MiniSearch/archive/refs/heads/main.zip) and running:
60
+
61
+ ```bash
62
+ docker compose -f docker-compose.production.yml up --build
63
+ ```
64
+
65
+ Once the container is running, open http://localhost:7860 in your browser and start searching!
66
+
67
+ ## Frequently asked questions
68
+
69
+ <details>
70
+ <summary>How do I search via the browser's address bar?</summary>
71
+ <p>
72
+ You can set MiniSearch as your browser's address-bar search engine using the pattern <code>http://localhost:7860/?q=%s</code>, in which your search term replaces <code>%s</code>.
73
+ </p>
74
+ </details>
75
+
76
+ <details>
77
+ <summary>How do I search via Raycast?</summary>
78
+ <p>
79
+ You can add <a href="https://ray.so/quicklinks/shared?quicklinks=%7B%22link%22:%22https:%5C/%5C/felladrin-minisearch.hf.space%5C/?q%3D%7BQuery%7D%22,%22name%22:%22MiniSearch%22%7D" target="_blank">this Quicklink</a> to Raycast, so typing your query will open MiniSearch with the search results. You can also edit it to point to your own domain.
80
+ </p>
81
+ <img width="744" alt="image" src="https://github.com/user-attachments/assets/521dca22-c77b-42de-8cc8-9feb06f9a97e">
82
+ </details>
83
+
84
+ <details>
85
+ <summary>Can I use custom models via OpenAI-Compatible API?</summary>
86
+ <p>
87
+ Yes! For this, open the Menu and change the "AI Processing Location" to <code>Remote server (API)</code>. Then configure the Base URL, and optionally set an API Key and a Model to use.
88
+ </p>
89
+ </details>
90
+
91
+ <details>
92
+ <summary>How do I restrict the access to my MiniSearch instance via password?</summary>
93
+ <p>
94
+ Create a <code>.env</code> file and set a value for <code>ACCESS_KEYS</code>. Then reset the MiniSearch docker container.
95
+ </p>
96
+ <p>
97
+ For example, if you to set the password to <code>PepperoniPizza</code>, then this is what you should add to your <code>.env</code>:<br/>
98
+ <code>ACCESS_KEYS="PepperoniPizza"</code>
99
+ </p>
100
+ <p>
101
+ You can find more examples in the <code>.env.example</code> file.
102
+ </p>
103
+ </details>
104
+
105
+ <details>
106
+ <summary>I want to serve MiniSearch to other users, allowing them to use my own OpenAI-Compatible API key, but without revealing it to them. Is it possible?</summary>
107
+ <p>Yes! In MiniSearch, we call this text-generation feature "Internal OpenAI-Compatible API". To use this it:</p>
108
+ <ol>
109
+ <li>Set up your OpenAI-Compatible API endpoint by configuring the following environment variables in your <code>.env</code> file:
110
+ <ul>
111
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL</code>: The base URL for your API</li>
112
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_KEY</code>: Your API access key</li>
113
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_MODEL</code>: The model to use</li>
114
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code>: The name to display in the UI</li>
115
+ </ul>
116
+ </li>
117
+ <li>Restart MiniSearch server.</li>
118
+ <li>In the MiniSearch menu, select the new option (named as per your <code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code> setting) from the "AI Processing Location" dropdown.</li>
119
+ </ol>
120
+ </details>
121
+
122
+ <details>
123
+ <summary>How can I contribute to the development of this tool?</summary>
124
+ <p>Fork this repository and clone it. Then, start the development server by running the following command:</p>
125
+ <p><code>docker compose up</code></p>
126
+ <p>Make your changes, push them to your fork, and open a pull request! All contributions are welcome!</p>
127
+ </details>
128
+
129
+ <details>
130
+ <summary>Why is MiniSearch built upon SearXNG's Docker Image and using a single image instead of composing it from multiple services?</summary>
131
+ <p>There are a few reasons for this:</p>
132
+ <ul>
133
+ <li>MiniSearch utilizes SearXNG as its meta-search engine.</li>
134
+ <li>Manual installation of SearXNG is not trivial, so we use the docker image they provide, which has everything set up.</li>
135
+ <li>SearXNG only provides a Docker Image based on Alpine Linux.</li>
136
+ <li>The user of the image needs to be customized in a specific way to run on HuggingFace Spaces, where MiniSearch's demo runs.</li>
137
+ <li>HuggingFace only accepts a single docker image. It doesn't run docker compose or multiple images, unfortunately.</li>
138
+ </ul>
139
+ </details>
biome.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json",
3
+ "vcs": {
4
+ "enabled": false,
5
+ "clientKind": "git",
6
+ "useIgnoreFile": false
7
+ },
8
+ "files": {
9
+ "ignoreUnknown": false,
10
+ "ignore": []
11
+ },
12
+ "formatter": {
13
+ "enabled": true,
14
+ "indentStyle": "space"
15
+ },
16
+ "organizeImports": {
17
+ "enabled": true
18
+ },
19
+ "linter": {
20
+ "enabled": true,
21
+ "rules": {
22
+ "recommended": true
23
+ }
24
+ },
25
+ "javascript": {
26
+ "formatter": {
27
+ "quoteStyle": "double"
28
+ }
29
+ }
30
+ }
client/components/AiResponse/AiModelDownloadAllowanceContent.tsx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Alert, Button, Group, Text } from "@mantine/core";
2
+ import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { useState } from "react";
5
+ import { addLogEntry } from "../../modules/logEntries";
6
+ import { settingsPubSub } from "../../modules/pubSub";
7
+
8
+ export default function AiModelDownloadAllowanceContent() {
9
+ const [settings, setSettings] = usePubSub(settingsPubSub);
10
+ const [hasDeniedDownload, setDeniedDownload] = useState(false);
11
+
12
+ const handleAccept = () => {
13
+ setSettings({
14
+ ...settings,
15
+ allowAiModelDownload: true,
16
+ });
17
+ addLogEntry("User allowed the AI model download");
18
+ };
19
+
20
+ const handleDecline = () => {
21
+ setDeniedDownload(true);
22
+ addLogEntry("User denied the AI model download");
23
+ };
24
+
25
+ return hasDeniedDownload ? null : (
26
+ <Alert
27
+ variant="light"
28
+ color="blue"
29
+ title="Allow AI model download?"
30
+ icon={<IconInfoCircle />}
31
+ >
32
+ <Text size="sm" mb="md">
33
+ To obtain AI responses, a language model needs to be downloaded to your
34
+ browser. Enabling this option lets the app store it and load it
35
+ instantly on subsequent uses.
36
+ </Text>
37
+ <Text size="sm" mb="md">
38
+ Please note that the download size ranges from 100 MB to 4 GB, depending
39
+ on the model you select in the Menu, so it's best to avoid using mobile
40
+ data for this.
41
+ </Text>
42
+ <Group justify="flex-end" mt="md">
43
+ <Button
44
+ variant="subtle"
45
+ color="gray"
46
+ leftSection={<IconX size="1rem" />}
47
+ onClick={handleDecline}
48
+ size="xs"
49
+ >
50
+ Not now
51
+ </Button>
52
+ <Button
53
+ leftSection={<IconCheck size="1rem" />}
54
+ onClick={handleAccept}
55
+ size="xs"
56
+ >
57
+ Allow download
58
+ </Button>
59
+ </Group>
60
+ </Alert>
61
+ );
62
+ }
client/components/AiResponse/AiResponseContent.tsx ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ActionIcon,
3
+ Alert,
4
+ Badge,
5
+ Box,
6
+ Card,
7
+ Group,
8
+ ScrollArea,
9
+ Text,
10
+ Tooltip,
11
+ } from "@mantine/core";
12
+ import {
13
+ IconArrowsMaximize,
14
+ IconArrowsMinimize,
15
+ IconHandStop,
16
+ IconInfoCircle,
17
+ IconRefresh,
18
+ IconVolume2,
19
+ } from "@tabler/icons-react";
20
+ import type { PublishFunction } from "create-pubsub";
21
+ import { usePubSub } from "create-pubsub/react";
22
+ import { type ReactNode, Suspense, lazy, useMemo, useState } from "react";
23
+ import { addLogEntry } from "../../modules/logEntries";
24
+ import { settingsPubSub } from "../../modules/pubSub";
25
+ import { searchAndRespond } from "../../modules/textGeneration";
26
+
27
+ const FormattedMarkdown = lazy(() => import("./FormattedMarkdown"));
28
+ const CopyIconButton = lazy(() => import("./CopyIconButton"));
29
+
30
+ export default function AiResponseContent({
31
+ textGenerationState,
32
+ response,
33
+ setTextGenerationState,
34
+ }: {
35
+ textGenerationState: string;
36
+ response: string;
37
+ setTextGenerationState: PublishFunction<
38
+ | "failed"
39
+ | "awaitingSearchResults"
40
+ | "preparingToGenerate"
41
+ | "idle"
42
+ | "loadingModel"
43
+ | "generating"
44
+ | "interrupted"
45
+ | "completed"
46
+ >;
47
+ }) {
48
+ const [settings, setSettings] = usePubSub(settingsPubSub);
49
+ const [isSpeaking, setIsSpeaking] = useState(false);
50
+
51
+ const ConditionalScrollArea = useMemo(
52
+ () =>
53
+ ({ children }: { children: ReactNode }) => {
54
+ return settings.enableAiResponseScrolling ? (
55
+ <ScrollArea.Autosize mah={300} type="auto" offsetScrollbars>
56
+ {children}
57
+ </ScrollArea.Autosize>
58
+ ) : (
59
+ <Box>{children}</Box>
60
+ );
61
+ },
62
+ [settings.enableAiResponseScrolling],
63
+ );
64
+
65
+ function speakResponse(text: string) {
66
+ if (isSpeaking) {
67
+ self.speechSynthesis.cancel();
68
+ setIsSpeaking(false);
69
+ return;
70
+ }
71
+
72
+ const prepareTextForSpeech = (textToClean: string) => {
73
+ const withoutLinks = textToClean.replace(/\[([^\]]+)\]\([^)]+\)/g, "");
74
+ const withoutMarkdown = withoutLinks.replace(/[#*`_~\[\]]/g, "");
75
+ return withoutMarkdown;
76
+ };
77
+
78
+ const utterance = new SpeechSynthesisUtterance(prepareTextForSpeech(text));
79
+
80
+ const voices = self.speechSynthesis.getVoices();
81
+
82
+ if (voices.length > 0 && settings.selectedVoiceId) {
83
+ const voice = voices.find(
84
+ (voice) => voice.voiceURI === settings.selectedVoiceId,
85
+ );
86
+
87
+ if (voice) {
88
+ utterance.voice = voice;
89
+ utterance.lang = voice.lang;
90
+ }
91
+ }
92
+
93
+ utterance.onerror = () => {
94
+ addLogEntry("Failed to speak response");
95
+ setIsSpeaking(false);
96
+ };
97
+
98
+ utterance.onend = () => setIsSpeaking(false);
99
+
100
+ setIsSpeaking(true);
101
+ self.speechSynthesis.speak(utterance);
102
+ }
103
+
104
+ return (
105
+ <Card withBorder shadow="sm" radius="md">
106
+ <Card.Section withBorder inheritPadding py="xs">
107
+ <Group justify="space-between">
108
+ <Group gap="xs" align="center">
109
+ <Text fw={500}>
110
+ {textGenerationState === "generating"
111
+ ? "Generating AI Response..."
112
+ : "AI Response"}
113
+ </Text>
114
+ {textGenerationState === "interrupted" && (
115
+ <Badge variant="light" color="yellow" size="xs">
116
+ Interrupted
117
+ </Badge>
118
+ )}
119
+ </Group>
120
+ <Group gap="xs" align="center">
121
+ {textGenerationState === "generating" ? (
122
+ <Tooltip label="Interrupt generation">
123
+ <ActionIcon
124
+ onClick={() => setTextGenerationState("interrupted")}
125
+ variant="subtle"
126
+ color="gray"
127
+ >
128
+ <IconHandStop size={16} />
129
+ </ActionIcon>
130
+ </Tooltip>
131
+ ) : (
132
+ <Tooltip label="Regenerate response">
133
+ <ActionIcon
134
+ onClick={() => searchAndRespond()}
135
+ variant="subtle"
136
+ color="gray"
137
+ >
138
+ <IconRefresh size={16} />
139
+ </ActionIcon>
140
+ </Tooltip>
141
+ )}
142
+ <Tooltip
143
+ label={isSpeaking ? "Stop speaking" : "Listen to response"}
144
+ >
145
+ <ActionIcon
146
+ onClick={() => speakResponse(response)}
147
+ variant="subtle"
148
+ color={isSpeaking ? "blue" : "gray"}
149
+ >
150
+ <IconVolume2 size={16} />
151
+ </ActionIcon>
152
+ </Tooltip>
153
+ {settings.enableAiResponseScrolling ? (
154
+ <Tooltip label="Show full response without scroll bar">
155
+ <ActionIcon
156
+ onClick={() => {
157
+ setSettings({
158
+ ...settings,
159
+ enableAiResponseScrolling: false,
160
+ });
161
+ }}
162
+ variant="subtle"
163
+ color="gray"
164
+ >
165
+ <IconArrowsMaximize size={16} />
166
+ </ActionIcon>
167
+ </Tooltip>
168
+ ) : (
169
+ <Tooltip label="Enable scroll bar">
170
+ <ActionIcon
171
+ onClick={() => {
172
+ setSettings({
173
+ ...settings,
174
+ enableAiResponseScrolling: true,
175
+ });
176
+ }}
177
+ variant="subtle"
178
+ color="gray"
179
+ >
180
+ <IconArrowsMinimize size={16} />
181
+ </ActionIcon>
182
+ </Tooltip>
183
+ )}
184
+ <Suspense>
185
+ <CopyIconButton value={response} tooltipLabel="Copy response" />
186
+ </Suspense>
187
+ </Group>
188
+ </Group>
189
+ </Card.Section>
190
+ <Card.Section withBorder>
191
+ <ConditionalScrollArea>
192
+ <Suspense>
193
+ <FormattedMarkdown>{response}</FormattedMarkdown>
194
+ </Suspense>
195
+ </ConditionalScrollArea>
196
+ {textGenerationState === "failed" && (
197
+ <Alert
198
+ variant="light"
199
+ color="yellow"
200
+ title="Failed to generate response"
201
+ icon={<IconInfoCircle />}
202
+ >
203
+ Could not generate response. Please try refreshing the page.
204
+ </Alert>
205
+ )}
206
+ </Card.Section>
207
+ </Card>
208
+ );
209
+ }
client/components/AiResponse/AiResponseSection.tsx ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import { Suspense, lazy, useMemo } from "react";
3
+ import {
4
+ modelLoadingProgressPubSub,
5
+ modelSizeInMegabytesPubSub,
6
+ queryPubSub,
7
+ responsePubSub,
8
+ settingsPubSub,
9
+ textGenerationStatePubSub,
10
+ } from "../../modules/pubSub";
11
+
12
+ const AiResponseContent = lazy(() => import("./AiResponseContent"));
13
+ const PreparingContent = lazy(() => import("./PreparingContent"));
14
+ const LoadingModelContent = lazy(() => import("./LoadingModelContent"));
15
+ const ChatInterface = lazy(() => import("./ChatInterface"));
16
+ const AiModelDownloadAllowanceContent = lazy(
17
+ () => import("./AiModelDownloadAllowanceContent"),
18
+ );
19
+
20
+ export default function AiResponseSection() {
21
+ const [query] = usePubSub(queryPubSub);
22
+ const [response] = usePubSub(responsePubSub);
23
+ const [textGenerationState, setTextGenerationState] = usePubSub(
24
+ textGenerationStatePubSub,
25
+ );
26
+ const [modelLoadingProgress] = usePubSub(modelLoadingProgressPubSub);
27
+ const [settings] = usePubSub(settingsPubSub);
28
+ const [modelSizeInMegabytes] = usePubSub(modelSizeInMegabytesPubSub);
29
+
30
+ return useMemo(() => {
31
+ if (!settings.enableAiResponse || textGenerationState === "idle") {
32
+ return null;
33
+ }
34
+
35
+ const generatingStates = [
36
+ "generating",
37
+ "interrupted",
38
+ "completed",
39
+ "failed",
40
+ ];
41
+ if (generatingStates.includes(textGenerationState)) {
42
+ return (
43
+ <>
44
+ <Suspense>
45
+ <AiResponseContent
46
+ textGenerationState={textGenerationState}
47
+ response={response}
48
+ setTextGenerationState={setTextGenerationState}
49
+ />
50
+ </Suspense>
51
+ {textGenerationState === "completed" && (
52
+ <Suspense>
53
+ <ChatInterface initialQuery={query} initialResponse={response} />
54
+ </Suspense>
55
+ )}
56
+ </>
57
+ );
58
+ }
59
+
60
+ if (textGenerationState === "loadingModel") {
61
+ return (
62
+ <Suspense>
63
+ <LoadingModelContent
64
+ modelLoadingProgress={modelLoadingProgress}
65
+ modelSizeInMegabytes={modelSizeInMegabytes}
66
+ />
67
+ </Suspense>
68
+ );
69
+ }
70
+
71
+ if (textGenerationState === "preparingToGenerate") {
72
+ return (
73
+ <Suspense>
74
+ <PreparingContent textGenerationState={textGenerationState} />
75
+ </Suspense>
76
+ );
77
+ }
78
+
79
+ if (textGenerationState === "awaitingSearchResults") {
80
+ return (
81
+ <Suspense>
82
+ <PreparingContent textGenerationState={textGenerationState} />
83
+ </Suspense>
84
+ );
85
+ }
86
+
87
+ if (textGenerationState === "awaitingModelDownloadAllowance") {
88
+ return (
89
+ <Suspense>
90
+ <AiModelDownloadAllowanceContent />
91
+ </Suspense>
92
+ );
93
+ }
94
+
95
+ return null;
96
+ }, [
97
+ settings.enableAiResponse,
98
+ textGenerationState,
99
+ response,
100
+ query,
101
+ modelLoadingProgress,
102
+ modelSizeInMegabytes,
103
+ setTextGenerationState,
104
+ ]);
105
+ }
client/components/AiResponse/ChatInterface.tsx ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Button,
3
+ Card,
4
+ Group,
5
+ Paper,
6
+ Stack,
7
+ Text,
8
+ Textarea,
9
+ } from "@mantine/core";
10
+ import { IconSend } from "@tabler/icons-react";
11
+ import { usePubSub } from "create-pubsub/react";
12
+ import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
13
+ import {
14
+ type KeyboardEvent,
15
+ Suspense,
16
+ lazy,
17
+ useEffect,
18
+ useRef,
19
+ useState,
20
+ } from "react";
21
+ import { handleEnterKeyDown } from "../../modules/keyboard";
22
+ import { addLogEntry } from "../../modules/logEntries";
23
+ import { settingsPubSub } from "../../modules/pubSub";
24
+ import { generateChatResponse } from "../../modules/textGeneration";
25
+
26
+ const FormattedMarkdown = lazy(() => import("./FormattedMarkdown"));
27
+ const CopyIconButton = lazy(() => import("./CopyIconButton"));
28
+
29
+ export default function ChatInterface({
30
+ initialQuery,
31
+ initialResponse,
32
+ }: {
33
+ initialQuery: string;
34
+ initialResponse: string;
35
+ }) {
36
+ const [messages, setMessages] = useState<ChatMessage[]>([]);
37
+ const [input, setInput] = useState("");
38
+ const [isGenerating, setIsGenerating] = useState(false);
39
+ const [streamedResponse, setStreamedResponse] = useState("");
40
+ const latestResponseRef = useRef("");
41
+ const [settings] = usePubSub(settingsPubSub);
42
+
43
+ useEffect(() => {
44
+ setMessages([
45
+ { role: "user", content: initialQuery },
46
+ { role: "assistant", content: initialResponse },
47
+ ]);
48
+ }, [initialQuery, initialResponse]);
49
+
50
+ const handleSend = async () => {
51
+ if (input.trim() === "" || isGenerating) return;
52
+
53
+ const newMessages: ChatMessage[] = [
54
+ ...messages,
55
+ { role: "user", content: input },
56
+ ];
57
+ setMessages(newMessages);
58
+ setInput("");
59
+ setIsGenerating(true);
60
+ setStreamedResponse("");
61
+ latestResponseRef.current = "";
62
+
63
+ try {
64
+ addLogEntry("User sent a follow-up question");
65
+ await generateChatResponse(newMessages, (partialResponse) => {
66
+ setStreamedResponse(partialResponse);
67
+ latestResponseRef.current = partialResponse;
68
+ });
69
+ setMessages((prevMessages) => [
70
+ ...prevMessages,
71
+ { role: "assistant", content: latestResponseRef.current },
72
+ ]);
73
+ addLogEntry("AI responded to follow-up question");
74
+ } catch (error) {
75
+ addLogEntry(`Error generating chat response: ${error}`);
76
+ setMessages((prevMessages) => [
77
+ ...prevMessages,
78
+ {
79
+ role: "assistant",
80
+ content: "Sorry, I encountered an error while generating a response.",
81
+ },
82
+ ]);
83
+ } finally {
84
+ setIsGenerating(false);
85
+ setStreamedResponse("");
86
+ }
87
+ };
88
+
89
+ const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement>) => {
90
+ handleEnterKeyDown(event, settings, handleSend);
91
+ };
92
+
93
+ const getChatContent = () => {
94
+ return messages
95
+ .slice(2)
96
+ .map(
97
+ (msg, index) =>
98
+ `${index + 1}. ${msg.role?.toUpperCase()}\n\n${msg.content}`,
99
+ )
100
+ .join("\n\n");
101
+ };
102
+
103
+ return (
104
+ <Card withBorder shadow="sm" radius="md">
105
+ <Card.Section withBorder inheritPadding py="xs">
106
+ <Group justify="space-between">
107
+ <Text fw={500}>Follow-up questions</Text>
108
+ {messages.length > 2 && (
109
+ <Suspense>
110
+ <CopyIconButton
111
+ value={getChatContent()}
112
+ tooltipLabel="Copy conversation"
113
+ />
114
+ </Suspense>
115
+ )}
116
+ </Group>
117
+ </Card.Section>
118
+ <Stack gap="md" pt="md">
119
+ {messages.slice(2).length > 0 && (
120
+ <Stack gap="md">
121
+ {messages.slice(2).map((message, index) => (
122
+ <Paper
123
+ key={`${message.role}-${index}`}
124
+ shadow="xs"
125
+ radius="xl"
126
+ p="sm"
127
+ maw="90%"
128
+ style={{
129
+ alignSelf:
130
+ message.role === "user" ? "flex-end" : "flex-start",
131
+ }}
132
+ >
133
+ <Suspense>
134
+ <FormattedMarkdown>{message.content}</FormattedMarkdown>
135
+ </Suspense>
136
+ </Paper>
137
+ ))}
138
+ {isGenerating && streamedResponse.length > 0 && (
139
+ <Paper
140
+ shadow="xs"
141
+ radius="xl"
142
+ p="sm"
143
+ maw="90%"
144
+ style={{ alignSelf: "flex-start" }}
145
+ >
146
+ <Suspense>
147
+ <FormattedMarkdown>{streamedResponse}</FormattedMarkdown>
148
+ </Suspense>
149
+ </Paper>
150
+ )}
151
+ </Stack>
152
+ )}
153
+ <Group align="flex-end" style={{ position: "relative" }}>
154
+ <Textarea
155
+ placeholder="Anything else you would like to know?"
156
+ value={input}
157
+ onChange={(event) => setInput(event.currentTarget.value)}
158
+ onKeyDown={handleKeyDown}
159
+ autosize
160
+ minRows={1}
161
+ maxRows={4}
162
+ style={{ flexGrow: 1, paddingRight: "50px" }}
163
+ disabled={isGenerating}
164
+ />
165
+ <Button
166
+ size="sm"
167
+ variant="default"
168
+ onClick={handleSend}
169
+ loading={isGenerating}
170
+ style={{
171
+ height: "100%",
172
+ position: "absolute",
173
+ right: 0,
174
+ top: 0,
175
+ bottom: 0,
176
+ borderTopLeftRadius: 0,
177
+ borderBottomLeftRadius: 0,
178
+ }}
179
+ >
180
+ <IconSend size={16} />
181
+ </Button>
182
+ </Group>
183
+ </Stack>
184
+ </Card>
185
+ );
186
+ }
client/components/AiResponse/CopyIconButton.tsx ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ActionIcon, CopyButton, Tooltip } from "@mantine/core";
2
+ import { IconCheck, IconCopy } from "@tabler/icons-react";
3
+
4
+ interface CopyIconButtonProps {
5
+ value: string;
6
+ tooltipLabel?: string;
7
+ }
8
+
9
+ export default function CopyIconButton({
10
+ value,
11
+ tooltipLabel = "Copy",
12
+ }: CopyIconButtonProps) {
13
+ return (
14
+ <CopyButton value={value} timeout={2000}>
15
+ {({ copied, copy }) => (
16
+ <Tooltip
17
+ label={copied ? "Copied" : tooltipLabel}
18
+ withArrow
19
+ position="right"
20
+ >
21
+ <ActionIcon
22
+ color={copied ? "teal" : "gray"}
23
+ variant="subtle"
24
+ onClick={copy}
25
+ >
26
+ {copied ? <IconCheck size={16} /> : <IconCopy size={16} />}
27
+ </ActionIcon>
28
+ </Tooltip>
29
+ )}
30
+ </CopyButton>
31
+ );
32
+ }
client/components/AiResponse/FormattedMarkdown.tsx ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Box, TypographyStylesProvider, useMantineTheme } from "@mantine/core";
2
+ import React from "react";
3
+ import Markdown from "react-markdown";
4
+ import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
5
+ import syntaxHighlighterStyle from "react-syntax-highlighter/dist/esm/styles/prism/one-dark";
6
+ import rehypeExternalLinks from "rehype-external-links";
7
+ import remarkGfm from "remark-gfm";
8
+ import CopyIconButton from "./CopyIconButton";
9
+
10
+ interface FormattedMarkdownProps {
11
+ children: string;
12
+ className?: string;
13
+ enableCopy?: boolean;
14
+ }
15
+
16
+ const FormattedMarkdown: React.FC<FormattedMarkdownProps> = ({
17
+ children,
18
+ className = "",
19
+ enableCopy = true,
20
+ }) => {
21
+ const theme = useMantineTheme();
22
+
23
+ if (!children) {
24
+ return null;
25
+ }
26
+
27
+ return (
28
+ <TypographyStylesProvider p="md">
29
+ <Box className={className}>
30
+ <Markdown
31
+ remarkPlugins={[remarkGfm]}
32
+ rehypePlugins={[
33
+ [
34
+ rehypeExternalLinks,
35
+ { target: "_blank", rel: ["nofollow", "noopener", "noreferrer"] },
36
+ ],
37
+ ]}
38
+ components={{
39
+ li(props) {
40
+ const { children } = props;
41
+ const processedChildren = React.Children.map(
42
+ children,
43
+ (child) => {
44
+ if (React.isValidElement(child) && child.type === "p") {
45
+ return (child.props as { children: React.ReactNode })
46
+ .children;
47
+ }
48
+ return child;
49
+ },
50
+ );
51
+ return <li>{processedChildren}</li>;
52
+ },
53
+ pre(props) {
54
+ return <>{props.children}</>;
55
+ },
56
+ code(props) {
57
+ const { children, className, node, ref, ...rest } = props;
58
+ void node;
59
+ const languageMatch = /language-(\w+)/.exec(className || "");
60
+ const codeContent = children?.toString().replace(/\n$/, "") ?? "";
61
+
62
+ if (languageMatch) {
63
+ return (
64
+ <Box
65
+ style={{
66
+ position: "relative",
67
+ marginBottom: theme.spacing.md,
68
+ }}
69
+ >
70
+ {enableCopy && (
71
+ <Box
72
+ style={{
73
+ position: "absolute",
74
+ top: theme.spacing.xs,
75
+ right: theme.spacing.xs,
76
+ zIndex: 2,
77
+ }}
78
+ >
79
+ <CopyIconButton value={codeContent} />
80
+ </Box>
81
+ )}
82
+ <SyntaxHighlighter
83
+ {...rest}
84
+ ref={ref as never}
85
+ language={languageMatch[1]}
86
+ style={syntaxHighlighterStyle}
87
+ >
88
+ {codeContent}
89
+ </SyntaxHighlighter>
90
+ </Box>
91
+ );
92
+ }
93
+
94
+ return (
95
+ <code
96
+ {...rest}
97
+ className={className}
98
+ style={{
99
+ backgroundColor: theme.colors.gray[8],
100
+ padding: "0.2em 0.4em",
101
+ borderRadius: theme.radius.sm,
102
+ fontSize: "0.9em",
103
+ }}
104
+ >
105
+ {children}
106
+ </code>
107
+ );
108
+ },
109
+ }}
110
+ >
111
+ {children}
112
+ </Markdown>
113
+ </Box>
114
+ </TypographyStylesProvider>
115
+ );
116
+ };
117
+
118
+ export default FormattedMarkdown;
client/components/AiResponse/LoadingModelContent.tsx ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Group, Progress, Stack, Text } from "@mantine/core";
2
+
3
+ export default function LoadingModelContent({
4
+ modelLoadingProgress,
5
+ modelSizeInMegabytes,
6
+ }: {
7
+ modelLoadingProgress: number;
8
+ modelSizeInMegabytes: number;
9
+ }) {
10
+ const isLoadingStarting = modelLoadingProgress === 0;
11
+ const isLoadingComplete = modelLoadingProgress === 100;
12
+ const percent =
13
+ isLoadingComplete || isLoadingStarting ? 100 : modelLoadingProgress;
14
+ const strokeColor = percent === 100 ? "#52c41a" : "#3385ff";
15
+ const downloadedSize = (modelSizeInMegabytes * modelLoadingProgress) / 100;
16
+ const sizeText = `${downloadedSize.toFixed(0)} MB / ${modelSizeInMegabytes.toFixed(0)} MB`;
17
+
18
+ return (
19
+ <Card withBorder shadow="sm" radius="md">
20
+ <Card.Section withBorder inheritPadding py="xs">
21
+ <Text fw={500}>Loading AI...</Text>
22
+ </Card.Section>
23
+ <Card.Section withBorder inheritPadding py="md">
24
+ <Stack gap="xs">
25
+ <Progress color={strokeColor} value={percent} animated />
26
+ {!isLoadingStarting && (
27
+ <Group justify="space-between">
28
+ <Text size="sm" c="dimmed">
29
+ {sizeText}
30
+ </Text>
31
+ <Text size="sm" c="dimmed">
32
+ {percent.toFixed(1)}%
33
+ </Text>
34
+ </Group>
35
+ )}
36
+ </Stack>
37
+ </Card.Section>
38
+ </Card>
39
+ );
40
+ }
client/components/AiResponse/PreparingContent.tsx ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Skeleton, Stack, Text } from "@mantine/core";
2
+
3
+ export default function PreparingContent({
4
+ textGenerationState,
5
+ }: {
6
+ textGenerationState: string;
7
+ }) {
8
+ const getStateText = () => {
9
+ if (textGenerationState === "awaitingSearchResults") {
10
+ return "Awaiting search results...";
11
+ }
12
+ if (textGenerationState === "preparingToGenerate") {
13
+ return "Preparing AI response...";
14
+ }
15
+ return null;
16
+ };
17
+
18
+ return (
19
+ <Card withBorder shadow="sm" radius="md">
20
+ <Card.Section withBorder inheritPadding py="xs">
21
+ <Text fw={500}>{getStateText()}</Text>
22
+ </Card.Section>
23
+ <Card.Section withBorder inheritPadding py="md">
24
+ <Stack>
25
+ <Skeleton height={8} radius="xl" />
26
+ <Skeleton height={8} width="70%" radius="xl" />
27
+ <Skeleton height={8} radius="xl" />
28
+ <Skeleton height={8} width="43%" radius="xl" />
29
+ </Stack>
30
+ </Card.Section>
31
+ </Card>
32
+ );
33
+ }
client/components/AiResponse/WebLlmModelSelect.tsx ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { type ComboboxItem, Select } from "@mantine/core";
2
+ import { prebuiltAppConfig } from "@mlc-ai/web-llm";
3
+ import { useCallback, useEffect, useState } from "react";
4
+ import { isF16Supported } from "../../modules/webGpu";
5
+
6
+ export default function WebLlmModelSelect({
7
+ value,
8
+ onChange,
9
+ }: {
10
+ value: string;
11
+ onChange: (value: string) => void;
12
+ }) {
13
+ const [webGpuModels] = useState<ComboboxItem[]>(() => {
14
+ const models = prebuiltAppConfig.model_list
15
+ .filter((model) => {
16
+ const isSmall = isSmallModel(model);
17
+ const suffix = getModelSuffix(isF16Supported, isSmall);
18
+ return model.model_id.endsWith(suffix);
19
+ })
20
+ .sort((a, b) => (a.vram_required_MB ?? 0) - (b.vram_required_MB ?? 0))
21
+ .map((model) => {
22
+ const modelSizeInMegabytes =
23
+ Math.round(model.vram_required_MB ?? 0) || "N/A";
24
+ const isSmall = isSmallModel(model);
25
+ const suffix = getModelSuffix(isF16Supported, isSmall);
26
+ const modelName = model.model_id.replace(suffix, "");
27
+
28
+ return {
29
+ label: `${modelSizeInMegabytes} MB β€’ ${modelName}`,
30
+ value: model.model_id,
31
+ };
32
+ });
33
+
34
+ return models;
35
+ });
36
+
37
+ useEffect(() => {
38
+ const isCurrentModelValid = webGpuModels.some(
39
+ (model) => model.value === value,
40
+ );
41
+
42
+ if (!isCurrentModelValid && webGpuModels.length > 0) {
43
+ onChange(webGpuModels[0].value);
44
+ }
45
+ }, [onChange, webGpuModels, value]);
46
+
47
+ const handleChange = useCallback(
48
+ (value: string | null) => {
49
+ if (value) onChange(value);
50
+ },
51
+ [onChange],
52
+ );
53
+
54
+ return (
55
+ <Select
56
+ value={value}
57
+ onChange={handleChange}
58
+ label="AI Model"
59
+ description="Select the model to use for AI responses."
60
+ data={webGpuModels}
61
+ allowDeselect={false}
62
+ searchable
63
+ />
64
+ );
65
+ }
66
+
67
+ type ModelConfig = (typeof prebuiltAppConfig.model_list)[number];
68
+
69
+ const smallModels = ["SmolLM2-135M", "SmolLM2-360M"] as const;
70
+
71
+ function isSmallModel(model: ModelConfig) {
72
+ return smallModels.some((smallModel) =>
73
+ model.model_id.startsWith(smallModel),
74
+ );
75
+ }
76
+
77
+ function getModelSuffix(isF16: boolean, isSmall: boolean) {
78
+ if (isSmall) return isF16 ? "-q0f16-MLC" : "-q0f32-MLC";
79
+
80
+ return isF16 ? "-q4f16_1-MLC" : "-q4f32_1-MLC";
81
+ }
client/components/AiResponse/WllamaModelSelect.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { type ComboboxItem, Select } from "@mantine/core";
2
+ import { useEffect, useState } from "react";
3
+ import { wllamaModels } from "../../modules/wllama";
4
+
5
+ export default function WllamaModelSelect({
6
+ value,
7
+ onChange,
8
+ }: {
9
+ value: string;
10
+ onChange: (value: string) => void;
11
+ }) {
12
+ const [wllamaModelOptions] = useState<ComboboxItem[]>(
13
+ Object.entries(wllamaModels)
14
+ .sort(([, a], [, b]) => a.fileSizeInMegabytes - b.fileSizeInMegabytes)
15
+ .map(([value, { label, fileSizeInMegabytes }]) => ({
16
+ label: `${fileSizeInMegabytes} MB β€’ ${label}`,
17
+ value,
18
+ })),
19
+ );
20
+
21
+ useEffect(() => {
22
+ const isCurrentModelValid = wllamaModelOptions.some(
23
+ (model) => model.value === value,
24
+ );
25
+
26
+ if (!isCurrentModelValid && wllamaModelOptions.length > 0) {
27
+ onChange(wllamaModelOptions[0].value);
28
+ }
29
+ }, [onChange, wllamaModelOptions, value]);
30
+
31
+ return (
32
+ <Select
33
+ value={value}
34
+ onChange={(value) => value && onChange(value)}
35
+ label="AI Model"
36
+ description="Select the model to use for AI responses."
37
+ data={wllamaModelOptions}
38
+ allowDeselect={false}
39
+ searchable
40
+ />
41
+ );
42
+ }
client/components/App/App.tsx ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { MantineProvider } from "@mantine/core";
2
+ import { Route, Switch } from "wouter";
3
+ import "@mantine/core/styles.css";
4
+ import { Notifications } from "@mantine/notifications";
5
+ import { usePubSub } from "create-pubsub/react";
6
+ import { lazy, useEffect, useState } from "react";
7
+ import { addLogEntry } from "../../modules/logEntries";
8
+ import { settingsPubSub } from "../../modules/pubSub";
9
+ import { defaultSettings } from "../../modules/settings";
10
+ import "@mantine/notifications/styles.css";
11
+ import { verifyStoredAccessKey } from "../../modules/accessKey";
12
+
13
+ const MainPage = lazy(() => import("../Pages/Main/MainPage"));
14
+ const AccessPage = lazy(() => import("../Pages/AccessPage"));
15
+
16
+ export function App() {
17
+ useInitializeSettings();
18
+ const { hasValidatedAccessKey, isCheckingStoredKey, setValidatedAccessKey } =
19
+ useAccessKeyValidation();
20
+
21
+ if (isCheckingStoredKey) {
22
+ return null;
23
+ }
24
+
25
+ return (
26
+ <MantineProvider defaultColorScheme="dark">
27
+ <Notifications />
28
+ <Switch>
29
+ <Route path="/">
30
+ {VITE_ACCESS_KEYS_ENABLED && !hasValidatedAccessKey ? (
31
+ <AccessPage onAccessKeyValid={() => setValidatedAccessKey(true)} />
32
+ ) : (
33
+ <MainPage />
34
+ )}
35
+ </Route>
36
+ </Switch>
37
+ </MantineProvider>
38
+ );
39
+ }
40
+
41
+ /**
42
+ * A custom React hook that initializes the application settings.
43
+ *
44
+ * @returns The initialized settings object.
45
+ *
46
+ * @remarks
47
+ * This hook uses the `usePubSub` hook to access and update the settings state.
48
+ * It initializes the settings by merging the default settings with any existing settings.
49
+ * The initialization is performed once when the component mounts.
50
+ */
51
+ function useInitializeSettings() {
52
+ const [settings, setSettings] = usePubSub(settingsPubSub);
53
+ const [settingsInitialized, setSettingsInitialized] = useState(false);
54
+
55
+ useEffect(() => {
56
+ if (settingsInitialized) return;
57
+
58
+ setSettings({ ...defaultSettings, ...settings });
59
+
60
+ setSettingsInitialized(true);
61
+
62
+ addLogEntry("Settings initialized");
63
+ }, [settings, setSettings, settingsInitialized]);
64
+
65
+ return settings;
66
+ }
67
+
68
+ /**
69
+ * A custom React hook that validates the stored access key on mount.
70
+ *
71
+ * @returns An object containing the validation state and loading state
72
+ */
73
+ function useAccessKeyValidation() {
74
+ const [hasValidatedAccessKey, setValidatedAccessKey] = useState(false);
75
+ const [isCheckingStoredKey, setCheckingStoredKey] = useState(true);
76
+
77
+ useEffect(() => {
78
+ async function checkStoredAccessKey() {
79
+ if (VITE_ACCESS_KEYS_ENABLED) {
80
+ const isValid = await verifyStoredAccessKey();
81
+ if (isValid) setValidatedAccessKey(true);
82
+ }
83
+ setCheckingStoredKey(false);
84
+ }
85
+
86
+ checkStoredAccessKey();
87
+ }, []);
88
+
89
+ return {
90
+ hasValidatedAccessKey,
91
+ isCheckingStoredKey,
92
+ setValidatedAccessKey,
93
+ };
94
+ }
client/components/Logs/LogsModal.tsx ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Alert,
3
+ Button,
4
+ Center,
5
+ Group,
6
+ Modal,
7
+ Pagination,
8
+ Table,
9
+ } from "@mantine/core";
10
+ import { IconInfoCircle } from "@tabler/icons-react";
11
+ import { usePubSub } from "create-pubsub/react";
12
+ import { useCallback, useMemo, useState } from "react";
13
+ import { logEntriesPubSub } from "../../modules/logEntries";
14
+
15
+ export default function LogsModal({
16
+ opened,
17
+ onClose,
18
+ }: {
19
+ opened: boolean;
20
+ onClose: () => void;
21
+ }) {
22
+ const [logEntries] = usePubSub(logEntriesPubSub);
23
+
24
+ const [page, setPage] = useState(1);
25
+
26
+ const logEntriesPerPage = 5;
27
+
28
+ const logEntriesFromCurrentPage = useMemo(
29
+ () =>
30
+ logEntries.slice(
31
+ (page - 1) * logEntriesPerPage,
32
+ page * logEntriesPerPage,
33
+ ),
34
+ [logEntries, page],
35
+ );
36
+
37
+ const downloadLogsAsJson = useCallback(() => {
38
+ const jsonString = JSON.stringify(logEntries, null, 2);
39
+ const blob = new Blob([jsonString], { type: "application/json" });
40
+ const url = URL.createObjectURL(blob);
41
+ const link = document.createElement("a");
42
+ link.href = url;
43
+ link.download = "logs.json";
44
+ document.body.appendChild(link);
45
+ link.click();
46
+ document.body.removeChild(link);
47
+ URL.revokeObjectURL(url);
48
+ }, [logEntries]);
49
+
50
+ return (
51
+ <Modal opened={opened} onClose={onClose} size="xl" title="Logs">
52
+ <Alert variant="light" color="blue" icon={<IconInfoCircle />} mb="md">
53
+ <Group justify="space-between" align="center">
54
+ <span>
55
+ This information is stored solely in your browser for personal use.
56
+ It isn't sent automatically and is retained for debugging purposes
57
+ should you need to{" "}
58
+ <a
59
+ href="https://github.com/felladrin/MiniSearch/issues/new?labels=bug&template=bug_report.yml"
60
+ target="_blank"
61
+ rel="noopener noreferrer"
62
+ >
63
+ report a bug
64
+ </a>
65
+ .
66
+ </span>
67
+ <Button onClick={downloadLogsAsJson} size="xs" data-autofocus>
68
+ Download Logs
69
+ </Button>
70
+ </Group>
71
+ </Alert>
72
+ <Table striped highlightOnHover withTableBorder>
73
+ <Table.Thead>
74
+ <Table.Tr>
75
+ <Table.Th>Time</Table.Th>
76
+ <Table.Th>Message</Table.Th>
77
+ </Table.Tr>
78
+ </Table.Thead>
79
+ <Table.Tbody>
80
+ {logEntriesFromCurrentPage.map((entry, index) => (
81
+ <Table.Tr key={`${entry.timestamp}-${index}`}>
82
+ <Table.Td>
83
+ {new Date(entry.timestamp).toLocaleTimeString()}
84
+ </Table.Td>
85
+ <Table.Td>{entry.message}</Table.Td>
86
+ </Table.Tr>
87
+ ))}
88
+ </Table.Tbody>
89
+ </Table>
90
+ <Center>
91
+ <Pagination
92
+ total={Math.ceil(logEntries.length / logEntriesPerPage)}
93
+ value={page}
94
+ onChange={setPage}
95
+ size="sm"
96
+ mt="md"
97
+ />
98
+ </Center>
99
+ </Modal>
100
+ );
101
+ }
client/components/Logs/ShowLogsButton.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Center, Loader, Stack, Text } from "@mantine/core";
2
+ import { Suspense, lazy, useState } from "react";
3
+ import { addLogEntry } from "../../modules/logEntries";
4
+
5
+ const LogsModal = lazy(() => import("./LogsModal"));
6
+
7
+ export default function ShowLogsButton() {
8
+ const [isLogsModalOpen, setLogsModalOpen] = useState(false);
9
+
10
+ const handleShowLogsButtonClick = () => {
11
+ addLogEntry("User opened the logs modal");
12
+ setLogsModalOpen(true);
13
+ };
14
+
15
+ const handleCloseLogsButtonClick = () => {
16
+ addLogEntry("User closed the logs modal");
17
+ setLogsModalOpen(false);
18
+ };
19
+
20
+ return (
21
+ <Stack gap="xs">
22
+ <Suspense
23
+ fallback={
24
+ <Center>
25
+ <Loader color="gray" type="bars" />
26
+ </Center>
27
+ }
28
+ >
29
+ <Button size="sm" onClick={handleShowLogsButtonClick} variant="default">
30
+ Show logs
31
+ </Button>
32
+ <Text size="xs" c="dimmed">
33
+ View session logs for debugging.
34
+ </Text>
35
+ <LogsModal
36
+ opened={isLogsModalOpen}
37
+ onClose={handleCloseLogsButtonClick}
38
+ />
39
+ </Suspense>
40
+ </Stack>
41
+ );
42
+ }
client/components/Pages/AccessPage.tsx ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Container, Stack, TextInput, Title } from "@mantine/core";
2
+ import { type FormEvent, useState } from "react";
3
+ import { validateAccessKey } from "../../modules/accessKey";
4
+ import { addLogEntry } from "../../modules/logEntries";
5
+
6
+ export default function AccessPage({
7
+ onAccessKeyValid,
8
+ }: {
9
+ onAccessKeyValid: () => void;
10
+ }) {
11
+ const [accessKey, setAccessKey] = useState("");
12
+ const [error, setError] = useState("");
13
+
14
+ const handleSubmit = async (formEvent: FormEvent<HTMLFormElement>) => {
15
+ formEvent.preventDefault();
16
+ setError("");
17
+ try {
18
+ const isValid = await validateAccessKey(accessKey);
19
+ if (isValid) {
20
+ addLogEntry("Valid access key entered");
21
+ onAccessKeyValid();
22
+ } else {
23
+ setError("Invalid access key");
24
+ addLogEntry("Invalid access key attempt");
25
+ }
26
+ } catch (error) {
27
+ setError("Error validating access key");
28
+ addLogEntry(`Error validating access key: ${error}`);
29
+ }
30
+ };
31
+
32
+ return (
33
+ <Container size="xs">
34
+ <Stack p="lg" mih="100vh" justify="center">
35
+ <Title order={2} ta="center">
36
+ Access Restricted
37
+ </Title>
38
+ <form onSubmit={handleSubmit}>
39
+ <Stack gap="xs">
40
+ <TextInput
41
+ value={accessKey}
42
+ onChange={({ target }) => setAccessKey(target.value)}
43
+ placeholder="Enter your access key to continue"
44
+ required
45
+ autoFocus
46
+ error={error}
47
+ styles={{
48
+ input: {
49
+ textAlign: "center",
50
+ },
51
+ }}
52
+ />
53
+ <Button size="xs" type="submit">
54
+ Submit
55
+ </Button>
56
+ </Stack>
57
+ </form>
58
+ </Stack>
59
+ </Container>
60
+ );
61
+ }
client/components/Pages/Main/MainPage.tsx ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Center, Container, Loader, Stack } from "@mantine/core";
2
+ import { usePubSub } from "create-pubsub/react";
3
+ import { Suspense } from "react";
4
+ import { lazy } from "react";
5
+ import {
6
+ imageSearchStatePubSub,
7
+ queryPubSub,
8
+ textGenerationStatePubSub,
9
+ textSearchStatePubSub,
10
+ } from "../../../modules/pubSub";
11
+
12
+ const AiResponseSection = lazy(
13
+ () => import("../../AiResponse/AiResponseSection"),
14
+ );
15
+ const SearchResultsSection = lazy(
16
+ () => import("../../Search/Results/SearchResultsSection"),
17
+ );
18
+ const MenuButton = lazy(() => import("./Menu/MenuButton"));
19
+ const SearchForm = lazy(() => import("../../Search/Form/SearchForm"));
20
+
21
+ export default function MainPage() {
22
+ const [query, updateQuery] = usePubSub(queryPubSub);
23
+ const [textSearchState] = usePubSub(textSearchStatePubSub);
24
+ const [imageSearchState] = usePubSub(imageSearchStatePubSub);
25
+ const [textGenerationState] = usePubSub(textGenerationStatePubSub);
26
+
27
+ return (
28
+ <Container>
29
+ <Stack
30
+ py="md"
31
+ mih="100vh"
32
+ justify={query.length === 0 ? "center" : undefined}
33
+ >
34
+ <Suspense
35
+ fallback={
36
+ <Center>
37
+ <Loader type="bars" />
38
+ </Center>
39
+ }
40
+ >
41
+ <SearchForm
42
+ query={query}
43
+ updateQuery={updateQuery}
44
+ additionalButtons={<MenuButton />}
45
+ />
46
+ </Suspense>
47
+ {textGenerationState !== "idle" && (
48
+ <Suspense>
49
+ <AiResponseSection />
50
+ </Suspense>
51
+ )}
52
+ {(textSearchState !== "idle" || imageSearchState !== "idle") && (
53
+ <Suspense>
54
+ <SearchResultsSection />
55
+ </Suspense>
56
+ )}
57
+ </Stack>
58
+ </Container>
59
+ );
60
+ }
client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Slider, Stack, Text } from "@mantine/core";
2
+ import type { AIParameterSliderProps } from "../types";
3
+
4
+ export const AIParameterSlider = ({
5
+ label,
6
+ description,
7
+ defaultValue,
8
+ ...props
9
+ }: AIParameterSliderProps) => (
10
+ <Stack gap="xs" mb="md">
11
+ <Text size="sm">{label}</Text>
12
+ <Text size="xs" c="dimmed">
13
+ {description} Defaults to {defaultValue}.
14
+ </Text>
15
+ <Slider {...props} />
16
+ </Stack>
17
+ );
client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NumberInput, Skeleton, Switch } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { Suspense, lazy } from "react";
4
+ import type { defaultSettings } from "../../../../../../modules/settings";
5
+
6
+ const WebLlmModelSelect = lazy(
7
+ () => import("../../../../../../components/AiResponse/WebLlmModelSelect"),
8
+ );
9
+ const WllamaModelSelect = lazy(
10
+ () => import("../../../../../../components/AiResponse/WllamaModelSelect"),
11
+ );
12
+
13
+ interface BrowserSettingsProps {
14
+ form: UseFormReturnType<typeof defaultSettings>;
15
+ isWebGPUAvailable: boolean;
16
+ }
17
+
18
+ export const BrowserSettings = ({
19
+ form,
20
+ isWebGPUAvailable,
21
+ }: BrowserSettingsProps) => (
22
+ <>
23
+ {isWebGPUAvailable && (
24
+ <Switch
25
+ label="WebGPU"
26
+ {...form.getInputProps("enableWebGpu", { type: "checkbox" })}
27
+ labelPosition="left"
28
+ description="Enable or disable WebGPU usage. When disabled, the app will use the CPU instead."
29
+ />
30
+ )}
31
+
32
+ {isWebGPUAvailable && form.values.enableWebGpu ? (
33
+ <Suspense fallback={<Skeleton height={50} />}>
34
+ <WebLlmModelSelect
35
+ value={form.values.webLlmModelId}
36
+ onChange={(value: string) =>
37
+ form.setFieldValue("webLlmModelId", value)
38
+ }
39
+ />
40
+ </Suspense>
41
+ ) : (
42
+ <>
43
+ <Suspense fallback={<Skeleton height={50} />}>
44
+ <WllamaModelSelect
45
+ value={form.values.wllamaModelId}
46
+ onChange={(value: string) =>
47
+ form.setFieldValue("wllamaModelId", value)
48
+ }
49
+ />
50
+ </Suspense>
51
+ <NumberInput
52
+ label="CPU threads to use"
53
+ description="Number of threads to use for the AI model. Lower values will use less CPU but may take longer to respond. A value that is too high may cause the app to hang."
54
+ min={1}
55
+ {...form.getInputProps("cpuThreads")}
56
+ />
57
+ </>
58
+ )}
59
+ </>
60
+ );
client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Select, TextInput } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import { aiHordeDefaultApiKey } from "../../../../../../modules/textGenerationWithHorde";
5
+ import type { HordeUserInfo, ModelOption } from "../types";
6
+
7
+ interface HordeSettingsProps {
8
+ form: UseFormReturnType<typeof defaultSettings>;
9
+ hordeUserInfo: HordeUserInfo | null;
10
+ hordeModels: ModelOption[];
11
+ }
12
+
13
+ export const HordeSettings = ({
14
+ form,
15
+ hordeUserInfo,
16
+ hordeModels,
17
+ }: HordeSettingsProps) => (
18
+ <>
19
+ <TextInput
20
+ label="API Key"
21
+ description={
22
+ hordeUserInfo
23
+ ? `Logged in as ${
24
+ hordeUserInfo.username
25
+ } (${hordeUserInfo.kudos.toLocaleString()} kudos)`
26
+ : "By default, it's set to '0000000000', for anonymous access. However, anonymous accounts have the lowest priority when there's too many concurrent requests."
27
+ }
28
+ type="password"
29
+ {...form.getInputProps("hordeApiKey")}
30
+ />
31
+ {form.values.hordeApiKey.length > 0 &&
32
+ form.values.hordeApiKey !== aiHordeDefaultApiKey && (
33
+ <Select
34
+ label="Model"
35
+ description="Optional. When not selected, AI Horde will automatically choose an available model."
36
+ placeholder="Auto-selected"
37
+ data={hordeModels}
38
+ {...form.getInputProps("hordeModel")}
39
+ searchable
40
+ clearable
41
+ />
42
+ )}
43
+ </>
44
+ );
client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Group, Select, Text, TextInput } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { IconInfoCircle } from "@tabler/icons-react";
4
+ import type { defaultSettings } from "../../../../../../modules/settings";
5
+ import type { ModelOption } from "../types";
6
+
7
+ interface OpenAISettingsProps {
8
+ form: UseFormReturnType<typeof defaultSettings>;
9
+ openAiModels: ModelOption[];
10
+ useTextInput: boolean;
11
+ }
12
+
13
+ export const OpenAISettings = ({
14
+ form,
15
+ openAiModels,
16
+ useTextInput,
17
+ }: OpenAISettingsProps) => (
18
+ <>
19
+ <TextInput
20
+ {...form.getInputProps("openAiApiBaseUrl")}
21
+ label="API Base URL"
22
+ placeholder="http://localhost:11434/v1"
23
+ required
24
+ />
25
+ <Group gap="xs">
26
+ <IconInfoCircle size={16} />
27
+ <Text size="xs" c="dimmed" flex={1}>
28
+ You may need to add{" "}
29
+ <em>{`${self.location.protocol}//${self.location.hostname}`}</em> to the
30
+ list of allowed network origins in your API server settings.
31
+ </Text>
32
+ </Group>
33
+ <TextInput
34
+ {...form.getInputProps("openAiApiKey")}
35
+ label="API Key"
36
+ type="password"
37
+ description="Optional, as local API servers usually do not require it."
38
+ />
39
+ {useTextInput ? (
40
+ <TextInput
41
+ {...form.getInputProps("openAiApiModel")}
42
+ label="API Model"
43
+ description="Enter the model identifier"
44
+ />
45
+ ) : (
46
+ <Select
47
+ {...form.getInputProps("openAiApiModel")}
48
+ label="API Model"
49
+ data={openAiModels}
50
+ description="Optional, as some API servers don't provide a model list."
51
+ allowDeselect={false}
52
+ disabled={openAiModels.length === 0}
53
+ searchable
54
+ />
55
+ )}
56
+ </>
57
+ );
client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Text, Textarea } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { defaultSettings } from "../../../../../../modules/settings";
4
+
5
+ interface SystemPromptInputProps {
6
+ form: UseFormReturnType<typeof defaultSettings>;
7
+ }
8
+
9
+ export const SystemPromptInput = ({ form }: SystemPromptInputProps) => {
10
+ const isUsingCustomInstructions =
11
+ form.values.systemPrompt !== defaultSettings.systemPrompt;
12
+
13
+ const handleRestoreDefaultInstructions = () => {
14
+ form.setFieldValue("systemPrompt", defaultSettings.systemPrompt);
15
+ };
16
+
17
+ return (
18
+ <Textarea
19
+ label="Instructions for AI"
20
+ descriptionProps={{ component: "div" }}
21
+ description={
22
+ <>
23
+ <span>
24
+ Customize instructions for the AI to tailor its responses.
25
+ </span>
26
+ <br />
27
+ <span>For example:</span>
28
+ <ul>
29
+ <li>
30
+ Specify preferences
31
+ <ul>
32
+ <li>
33
+ <em>"use simple language"</em>
34
+ </li>
35
+ <li>
36
+ <em>"provide step-by-step explanations"</em>
37
+ </li>
38
+ </ul>
39
+ </li>
40
+ <li>
41
+ Set a response style
42
+ <ul>
43
+ <li>
44
+ <em>"answer in a friendly tone"</em>
45
+ </li>
46
+ <li>
47
+ <em>"write your response in Spanish"</em>
48
+ </li>
49
+ </ul>
50
+ </li>
51
+ <li>
52
+ Provide context about the audience
53
+ <ul>
54
+ <li>
55
+ <em>"you're talking to a high school student"</em>
56
+ </li>
57
+ <li>
58
+ <em>
59
+ "consider that your audience is composed of professionals in
60
+ the field of graphic design"
61
+ </em>
62
+ </li>
63
+ </ul>
64
+ </li>
65
+ </ul>
66
+ <span>
67
+ The special tag <em>{"{{searchResults}}"}</em> will be replaced with
68
+ the search results, while <em>{"{{dateTime}}"}</em> will be replaced
69
+ with the current date and time.
70
+ </span>
71
+ {isUsingCustomInstructions && (
72
+ <>
73
+ <br />
74
+ <br />
75
+ <span>
76
+ Currently, you're using custom instructions. If you ever need to
77
+ restore the default instructions, you can do so by clicking
78
+ </span>{" "}
79
+ <Text
80
+ component="span"
81
+ size="xs"
82
+ c="blue"
83
+ style={{ cursor: "pointer" }}
84
+ onClick={handleRestoreDefaultInstructions}
85
+ >
86
+ here
87
+ </Text>
88
+ <span>.</span>
89
+ </>
90
+ )}
91
+ </>
92
+ }
93
+ autosize
94
+ maxRows={10}
95
+ {...form.getInputProps("systemPrompt")}
96
+ />
97
+ );
98
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useHordeModels.ts ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import { fetchHordeModels } from "../../../../../../modules/textGenerationWithHorde";
5
+ import type { ModelOption } from "../types";
6
+
7
+ type Settings = typeof defaultSettings;
8
+
9
+ export const useHordeModels = (settings: Settings) => {
10
+ const [hordeModels, setHordeModels] = useState<ModelOption[]>([]);
11
+
12
+ useEffect(() => {
13
+ async function fetchAvailableHordeModels() {
14
+ try {
15
+ const models = await fetchHordeModels();
16
+ const formattedModels = models.map((model) => ({
17
+ label: model.name,
18
+ value: model.name,
19
+ }));
20
+ setHordeModels(formattedModels);
21
+ } catch (error) {
22
+ const errorMessage =
23
+ error instanceof Error ? error.message : String(error);
24
+ addLogEntry(`Error fetching AI Horde models: ${errorMessage}`);
25
+ setHordeModels([]);
26
+ }
27
+ }
28
+
29
+ if (settings.inferenceType === "horde") {
30
+ fetchAvailableHordeModels();
31
+ }
32
+ }, [settings.inferenceType]);
33
+
34
+ return hordeModels;
35
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useHordeUserInfo.ts ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import {
5
+ aiHordeDefaultApiKey,
6
+ fetchHordeUserInfo,
7
+ } from "../../../../../../modules/textGenerationWithHorde";
8
+ import type { HordeUserInfo } from "../types";
9
+
10
+ type Settings = typeof defaultSettings;
11
+
12
+ export const useHordeUserInfo = (settings: Settings) => {
13
+ const [hordeUserInfo, setHordeUserInfo] = useState<HordeUserInfo | null>(
14
+ null,
15
+ );
16
+
17
+ useEffect(() => {
18
+ async function fetchUserInfo() {
19
+ try {
20
+ if (
21
+ settings.hordeApiKey &&
22
+ settings.hordeApiKey !== aiHordeDefaultApiKey
23
+ ) {
24
+ const userInfo = await fetchHordeUserInfo(settings.hordeApiKey);
25
+ setHordeUserInfo(userInfo);
26
+ } else {
27
+ setHordeUserInfo(null);
28
+ }
29
+ } catch (error) {
30
+ const errorMessage =
31
+ error instanceof Error ? error.message : String(error);
32
+ addLogEntry(`Error fetching AI Horde user info: ${errorMessage}`);
33
+ setHordeUserInfo(null);
34
+ }
35
+ }
36
+
37
+ if (settings.inferenceType === "horde") {
38
+ fetchUserInfo();
39
+ }
40
+ }, [settings.inferenceType, settings.hordeApiKey]);
41
+
42
+ return hordeUserInfo;
43
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useOpenAiModels.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import type { ModelOption } from "../types";
5
+
6
+ type Settings = typeof defaultSettings;
7
+
8
+ export const useOpenAiModels = (settings: Settings) => {
9
+ const [openAiModels, setOpenAiModels] = useState<ModelOption[]>([]);
10
+ const [useTextInput, setUseTextInput] = useState(false);
11
+
12
+ useEffect(() => {
13
+ async function fetchOpenAiModels() {
14
+ try {
15
+ const response = await fetch(`${settings.openAiApiBaseUrl}/models`, {
16
+ headers: {
17
+ Authorization: `Bearer ${settings.openAiApiKey}`,
18
+ },
19
+ });
20
+
21
+ if (!response.ok) {
22
+ throw new Error(`Failed to fetch models: ${response.statusText}`);
23
+ }
24
+
25
+ const data = await response.json();
26
+ const models = data.data.map((model: { id: string }) => ({
27
+ label: model.id,
28
+ value: model.id,
29
+ }));
30
+
31
+ setOpenAiModels(models);
32
+ setUseTextInput(!Array.isArray(models) || models.length === 0);
33
+ } catch (error) {
34
+ const errorMessage =
35
+ error instanceof Error ? error.message : String(error);
36
+ addLogEntry(`Error fetching OpenAI models: ${errorMessage}`);
37
+ setOpenAiModels([]);
38
+ setUseTextInput(true);
39
+ }
40
+ }
41
+
42
+ if (settings.inferenceType === "openai" && settings.openAiApiBaseUrl) {
43
+ fetchOpenAiModels();
44
+ }
45
+ }, [
46
+ settings.inferenceType,
47
+ settings.openAiApiBaseUrl,
48
+ settings.openAiApiKey,
49
+ ]);
50
+
51
+ return { openAiModels, useTextInput };
52
+ };
client/components/Pages/Main/Menu/AISettings/index.tsx ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Select, Stack, Switch } from "@mantine/core";
2
+ import { useForm } from "@mantine/form";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { settingsPubSub } from "../../../../../modules/pubSub";
5
+ import {
6
+ defaultSettings,
7
+ inferenceTypes,
8
+ } from "../../../../../modules/settings";
9
+ import { isWebGPUAvailable } from "../../../../../modules/webGpu";
10
+ import { AIParameterSlider } from "./components/AIParameterSlider";
11
+ import { BrowserSettings } from "./components/BrowserSettings";
12
+ import { HordeSettings } from "./components/HordeSettings";
13
+ import { OpenAISettings } from "./components/OpenAISettings";
14
+ import { SystemPromptInput } from "./components/SystemPromptInput";
15
+ import { useHordeModels } from "./hooks/useHordeModels";
16
+ import { useHordeUserInfo } from "./hooks/useHordeUserInfo";
17
+ import { useOpenAiModels } from "./hooks/useOpenAiModels";
18
+ import { penaltySliderMarks } from "./types";
19
+
20
+ export default function AISettingsForm() {
21
+ const [settings, setSettings] = usePubSub(settingsPubSub);
22
+ const { openAiModels, useTextInput } = useOpenAiModels(settings);
23
+ const hordeModels = useHordeModels(settings);
24
+ const hordeUserInfo = useHordeUserInfo(settings);
25
+
26
+ const form = useForm({
27
+ initialValues: settings,
28
+ onValuesChange: setSettings,
29
+ });
30
+
31
+ return (
32
+ <Stack gap="md">
33
+ <Switch
34
+ label="AI Response"
35
+ {...form.getInputProps("enableAiResponse", { type: "checkbox" })}
36
+ labelPosition="left"
37
+ description="Enable or disable AI-generated responses to your queries. When disabled, you'll only see web search results."
38
+ />
39
+
40
+ {form.values.enableAiResponse && (
41
+ <>
42
+ <Select
43
+ {...form.getInputProps("inferenceType")}
44
+ label="AI Processing Location"
45
+ data={inferenceTypes}
46
+ allowDeselect={false}
47
+ />
48
+
49
+ {form.values.inferenceType === "openai" && (
50
+ <OpenAISettings
51
+ form={form}
52
+ openAiModels={openAiModels}
53
+ useTextInput={useTextInput}
54
+ />
55
+ )}
56
+
57
+ {form.values.inferenceType === "horde" && (
58
+ <HordeSettings
59
+ form={form}
60
+ hordeUserInfo={hordeUserInfo}
61
+ hordeModels={hordeModels}
62
+ />
63
+ )}
64
+
65
+ {form.values.inferenceType === "browser" && (
66
+ <BrowserSettings
67
+ form={form}
68
+ isWebGPUAvailable={isWebGPUAvailable}
69
+ />
70
+ )}
71
+
72
+ <SystemPromptInput form={form} />
73
+
74
+ <AIParameterSlider
75
+ label="Temperature"
76
+ description="Controls randomness in responses. Lower values make responses more focused and deterministic, while higher values make them more creative and diverse."
77
+ defaultValue={defaultSettings.inferenceTemperature}
78
+ {...form.getInputProps("inferenceTemperature")}
79
+ min={0}
80
+ max={2}
81
+ step={0.01}
82
+ marks={[
83
+ { value: 0, label: "0" },
84
+ { value: 1, label: "1" },
85
+ { value: 2, label: "2" },
86
+ ]}
87
+ />
88
+
89
+ <AIParameterSlider
90
+ label="Top P"
91
+ description="Controls diversity by limiting cumulative probability of tokens. Lower values make responses more focused, while higher values allow more variety."
92
+ defaultValue={defaultSettings.inferenceTopP}
93
+ {...form.getInputProps("inferenceTopP")}
94
+ min={0}
95
+ max={1}
96
+ step={0.01}
97
+ marks={Array.from({ length: 3 }, (_, index) => ({
98
+ value: index / 2,
99
+ label: (index / 2).toString(),
100
+ }))}
101
+ />
102
+
103
+ <AIParameterSlider
104
+ label="Frequency Penalty"
105
+ description="Reduces repetition by penalizing tokens based on their frequency. Higher values decrease the likelihood of repeating the same information."
106
+ defaultValue={defaultSettings.inferenceFrequencyPenalty}
107
+ {...form.getInputProps("inferenceFrequencyPenalty")}
108
+ min={-2.0}
109
+ max={2.0}
110
+ step={0.01}
111
+ marks={penaltySliderMarks}
112
+ />
113
+
114
+ <AIParameterSlider
115
+ label="Presence Penalty"
116
+ description="Encourages new topics by penalizing tokens that have appeared. Higher values increase the model's likelihood to talk about new topics."
117
+ defaultValue={defaultSettings.inferencePresencePenalty}
118
+ {...form.getInputProps("inferencePresencePenalty")}
119
+ min={-2.0}
120
+ max={2.0}
121
+ step={0.01}
122
+ marks={penaltySliderMarks}
123
+ />
124
+ </>
125
+ )}
126
+ </Stack>
127
+ );
128
+ }
client/components/Pages/Main/Menu/AISettings/types.ts ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { UseFormReturnType } from "@mantine/form";
2
+ import type { defaultSettings } from "../../../../../modules/settings";
3
+
4
+ export interface ModelOption {
5
+ label: string;
6
+ value: string;
7
+ }
8
+
9
+ export interface HordeUserInfo {
10
+ username: string;
11
+ kudos: number;
12
+ }
13
+
14
+ export interface AISettingsFormProps {
15
+ form: UseFormReturnType<typeof defaultSettings>;
16
+ }
17
+
18
+ export interface AIParameterSliderProps extends Record<string, unknown> {
19
+ label: string;
20
+ description: string;
21
+ defaultValue: number;
22
+ }
23
+
24
+ export const penaltySliderMarks = [
25
+ { value: -2.0, label: "-2.0" },
26
+ { value: 0.0, label: "0" },
27
+ { value: 2.0, label: "2.0" },
28
+ ];
client/components/Pages/Main/Menu/AISettingsForm.tsx ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Group,
3
+ NumberInput,
4
+ Select,
5
+ Skeleton,
6
+ Slider,
7
+ Stack,
8
+ Switch,
9
+ Text,
10
+ TextInput,
11
+ Textarea,
12
+ } from "@mantine/core";
13
+ import { useForm } from "@mantine/form";
14
+ import { IconInfoCircle } from "@tabler/icons-react";
15
+ import { usePubSub } from "create-pubsub/react";
16
+ import { Suspense, lazy, useEffect, useState } from "react";
17
+ import { addLogEntry } from "../../../../modules/logEntries";
18
+ import { settingsPubSub } from "../../../../modules/pubSub";
19
+ import { defaultSettings, inferenceTypes } from "../../../../modules/settings";
20
+ import {
21
+ aiHordeDefaultApiKey,
22
+ fetchHordeModels,
23
+ fetchHordeUserInfo,
24
+ } from "../../../../modules/textGenerationWithHorde";
25
+ import { isWebGPUAvailable } from "../../../../modules/webGpu";
26
+
27
+ const WebLlmModelSelect = lazy(
28
+ () => import("../../../AiResponse/WebLlmModelSelect"),
29
+ );
30
+ const WllamaModelSelect = lazy(
31
+ () => import("../../../AiResponse/WllamaModelSelect"),
32
+ );
33
+
34
+ const penaltySliderMarks = [
35
+ { value: -2.0, label: "-2.0" },
36
+ { value: 0.0, label: "0" },
37
+ { value: 2.0, label: "2.0" },
38
+ ];
39
+
40
+ export default function AISettingsForm() {
41
+ const [settings, setSettings] = usePubSub(settingsPubSub);
42
+ const [hordeUserInfo, setHordeUserInfo] = useState<{
43
+ username: string;
44
+ kudos: number;
45
+ } | null>(null);
46
+ const [openAiModels, setOpenAiModels] = useState<
47
+ {
48
+ label: string;
49
+ value: string;
50
+ }[]
51
+ >([]);
52
+ const [hordeModels, setHordeModels] = useState<
53
+ {
54
+ label: string;
55
+ value: string;
56
+ }[]
57
+ >([]);
58
+ const [useTextInput, setUseTextInput] = useState(false);
59
+
60
+ const form = useForm({
61
+ initialValues: settings,
62
+ onValuesChange: setSettings,
63
+ });
64
+
65
+ useEffect(() => {
66
+ async function fetchOpenAiModels() {
67
+ try {
68
+ const response = await fetch(`${settings.openAiApiBaseUrl}/models`, {
69
+ headers: {
70
+ Authorization: `Bearer ${settings.openAiApiKey}`,
71
+ },
72
+ });
73
+
74
+ if (!response.ok) {
75
+ throw new Error(`Failed to fetch models: ${response.statusText}`);
76
+ }
77
+
78
+ const data = await response.json();
79
+ const models = data.data.map((model: { id: string }) => ({
80
+ label: model.id,
81
+ value: model.id,
82
+ }));
83
+
84
+ setOpenAiModels(models);
85
+ const hasNoModelsDefined =
86
+ !Array.isArray(models) || models.length === 0;
87
+ setUseTextInput(hasNoModelsDefined);
88
+ } catch (error) {
89
+ const errorMessage =
90
+ error instanceof Error ? error.message : String(error);
91
+ addLogEntry(`Error fetching OpenAI models: ${errorMessage}`);
92
+ setOpenAiModels([]);
93
+ setUseTextInput(true);
94
+ }
95
+ }
96
+
97
+ if (settings.inferenceType === "openai" && settings.openAiApiBaseUrl) {
98
+ fetchOpenAiModels();
99
+ }
100
+ }, [
101
+ settings.inferenceType,
102
+ settings.openAiApiBaseUrl,
103
+ settings.openAiApiKey,
104
+ ]);
105
+
106
+ useEffect(() => {
107
+ async function fetchAvailableHordeModels() {
108
+ try {
109
+ const models = await fetchHordeModels();
110
+ const formattedModels = models.map((model) => ({
111
+ label: model.name,
112
+ value: model.name,
113
+ }));
114
+ setHordeModels(formattedModels);
115
+ } catch (error) {
116
+ const errorMessage =
117
+ error instanceof Error ? error.message : String(error);
118
+ addLogEntry(`Error fetching AI Horde models: ${errorMessage}`);
119
+ setHordeModels([]);
120
+ }
121
+ }
122
+
123
+ if (settings.inferenceType === "horde") {
124
+ fetchAvailableHordeModels();
125
+ }
126
+ }, [settings.inferenceType]);
127
+
128
+ useEffect(() => {
129
+ async function fetchUserInfo() {
130
+ try {
131
+ if (
132
+ settings.hordeApiKey &&
133
+ settings.hordeApiKey !== aiHordeDefaultApiKey
134
+ ) {
135
+ const userInfo = await fetchHordeUserInfo(settings.hordeApiKey);
136
+ setHordeUserInfo(userInfo);
137
+ } else {
138
+ setHordeUserInfo(null);
139
+ }
140
+ } catch (error) {
141
+ const errorMessage =
142
+ error instanceof Error ? error.message : String(error);
143
+ addLogEntry(`Error fetching AI Horde user info: ${errorMessage}`);
144
+ setHordeUserInfo(null);
145
+ }
146
+ }
147
+
148
+ if (settings.inferenceType === "horde") {
149
+ fetchUserInfo();
150
+ }
151
+ }, [settings.inferenceType, settings.hordeApiKey]);
152
+
153
+ useEffect(() => {
154
+ if (openAiModels.length > 0) {
155
+ const hasNoModelSelected = !form.values.openAiApiModel;
156
+ const isModelInvalid = !openAiModels.find(
157
+ (model) => model.value === form.values.openAiApiModel,
158
+ );
159
+
160
+ if (hasNoModelSelected || isModelInvalid) {
161
+ form.setFieldValue("openAiApiModel", openAiModels[0].value);
162
+ }
163
+ }
164
+ }, [openAiModels, form.setFieldValue, form.values.openAiApiModel]);
165
+
166
+ const isUsingCustomInstructions =
167
+ form.values.systemPrompt !== defaultSettings.systemPrompt;
168
+
169
+ const handleRestoreDefaultInstructions = () => {
170
+ form.setFieldValue("systemPrompt", defaultSettings.systemPrompt);
171
+ };
172
+
173
+ return (
174
+ <Stack gap="md">
175
+ <Switch
176
+ label="AI Response"
177
+ {...form.getInputProps("enableAiResponse", {
178
+ type: "checkbox",
179
+ })}
180
+ labelPosition="left"
181
+ description="Enable or disable AI-generated responses to your queries. When disabled, you'll only see web search results."
182
+ />
183
+
184
+ {form.values.enableAiResponse && (
185
+ <>
186
+ <Stack gap="xs" mb="md">
187
+ <Text size="sm">Search results to consider</Text>
188
+ <Text size="xs" c="dimmed">
189
+ Determines the number of search results to consider when
190
+ generating AI responses. A higher value may enhance accuracy, but
191
+ it will also increase response time.
192
+ </Text>
193
+ <Slider
194
+ {...form.getInputProps("searchResultsToConsider")}
195
+ min={0}
196
+ max={6}
197
+ marks={Array.from({ length: 7 }, (_, index) => ({
198
+ value: index,
199
+ label: index.toString(),
200
+ }))}
201
+ />
202
+ </Stack>
203
+
204
+ <Select
205
+ {...form.getInputProps("inferenceType")}
206
+ label="AI Processing Location"
207
+ data={inferenceTypes}
208
+ allowDeselect={false}
209
+ />
210
+
211
+ {form.values.inferenceType === "openai" && (
212
+ <>
213
+ <TextInput
214
+ {...form.getInputProps("openAiApiBaseUrl")}
215
+ label="API Base URL"
216
+ placeholder="http://localhost:11434/v1"
217
+ required
218
+ />
219
+ <Group gap="xs">
220
+ <IconInfoCircle size={16} />
221
+ <Text size="xs" c="dimmed" flex={1}>
222
+ You may need to add{" "}
223
+ <em>
224
+ {`${self.location.protocol}//${self.location.hostname}`}
225
+ </em>{" "}
226
+ to the list of allowed network origins in your API server
227
+ settings.
228
+ </Text>
229
+ </Group>
230
+ <TextInput
231
+ {...form.getInputProps("openAiApiKey")}
232
+ label="API Key"
233
+ type="password"
234
+ description="Optional, as local API servers usually do not require it."
235
+ />
236
+ {useTextInput ? (
237
+ <TextInput
238
+ {...form.getInputProps("openAiApiModel")}
239
+ label="API Model"
240
+ description="Enter the model identifier"
241
+ />
242
+ ) : (
243
+ <Select
244
+ {...form.getInputProps("openAiApiModel")}
245
+ label="API Model"
246
+ data={openAiModels}
247
+ description="Optional, as some API servers don't provide a model list."
248
+ allowDeselect={false}
249
+ disabled={openAiModels.length === 0}
250
+ searchable
251
+ />
252
+ )}
253
+ </>
254
+ )}
255
+
256
+ {form.values.inferenceType === "horde" && (
257
+ <>
258
+ <TextInput
259
+ label="API Key"
260
+ description={
261
+ hordeUserInfo
262
+ ? `Logged in as ${
263
+ hordeUserInfo.username
264
+ } (${hordeUserInfo.kudos.toLocaleString()} kudos)`
265
+ : "By default, it's set to '0000000000', for anonymous access. However, anonymous accounts have the lowest priority when there's too many concurrent requests."
266
+ }
267
+ type="password"
268
+ {...form.getInputProps("hordeApiKey")}
269
+ />
270
+ {form.values.hordeApiKey.length > 0 &&
271
+ form.values.hordeApiKey !== aiHordeDefaultApiKey && (
272
+ <Select
273
+ label="Model"
274
+ description="Optional. When not selected, AI Horde will automatically choose an available model."
275
+ placeholder="Auto-selected"
276
+ data={hordeModels}
277
+ {...form.getInputProps("hordeModel")}
278
+ searchable
279
+ clearable
280
+ />
281
+ )}
282
+ </>
283
+ )}
284
+
285
+ {form.values.inferenceType === "browser" && (
286
+ <>
287
+ {isWebGPUAvailable && (
288
+ <Switch
289
+ label="WebGPU"
290
+ {...form.getInputProps("enableWebGpu", {
291
+ type: "checkbox",
292
+ })}
293
+ labelPosition="left"
294
+ description="Enable or disable WebGPU usage. When disabled, the app will use the CPU instead."
295
+ />
296
+ )}
297
+
298
+ {isWebGPUAvailable && form.values.enableWebGpu ? (
299
+ <Suspense fallback={<Skeleton height={50} />}>
300
+ <WebLlmModelSelect
301
+ value={form.values.webLlmModelId}
302
+ onChange={(value) =>
303
+ form.setFieldValue("webLlmModelId", value)
304
+ }
305
+ />
306
+ </Suspense>
307
+ ) : (
308
+ <>
309
+ <Suspense fallback={<Skeleton height={50} />}>
310
+ <WllamaModelSelect
311
+ value={form.values.wllamaModelId}
312
+ onChange={(value) =>
313
+ form.setFieldValue("wllamaModelId", value)
314
+ }
315
+ />
316
+ </Suspense>
317
+ <NumberInput
318
+ label="CPU threads to use"
319
+ description={
320
+ <span>
321
+ Number of threads to use for the AI model. Lower values
322
+ will use less CPU but may take longer to respond. A
323
+ value that is too high may cause the app to hang.
324
+ </span>
325
+ }
326
+ min={1}
327
+ {...form.getInputProps("cpuThreads")}
328
+ />
329
+ </>
330
+ )}
331
+ </>
332
+ )}
333
+
334
+ <Textarea
335
+ label="Instructions for AI"
336
+ descriptionProps={{ component: "div" }}
337
+ description={
338
+ <>
339
+ <span>
340
+ Customize instructions for the AI to tailor its responses.
341
+ </span>
342
+ <br />
343
+ <span>For example:</span>
344
+ <ul>
345
+ <li>
346
+ Specify preferences
347
+ <ul>
348
+ <li>
349
+ <em>"use simple language"</em>
350
+ </li>
351
+ <li>
352
+ <em>"provide step-by-step explanations"</em>
353
+ </li>
354
+ </ul>
355
+ </li>
356
+ <li>
357
+ Set a response style
358
+ <ul>
359
+ <li>
360
+ <em>"answer in a friendly tone"</em>
361
+ </li>
362
+ <li>
363
+ <em>"write your response in Spanish"</em>
364
+ </li>
365
+ </ul>
366
+ </li>
367
+ <li>
368
+ Provide context about the audience
369
+ <ul>
370
+ <li>
371
+ <em>"you're talking to a high school student"</em>
372
+ </li>
373
+ <li>
374
+ <em>
375
+ "consider that your audience is composed of
376
+ professionals in the field of graphic design"
377
+ </em>
378
+ </li>
379
+ </ul>
380
+ </li>
381
+ </ul>
382
+ <span>
383
+ The special tag <em>{"{{searchResults}}"}</em> will be
384
+ replaced with the search results, while{" "}
385
+ <em>{"{{dateTime}}"}</em> will be replaced with the current
386
+ date and time.
387
+ </span>
388
+ {isUsingCustomInstructions && (
389
+ <>
390
+ <br />
391
+ <br />
392
+ <span>
393
+ Currently, you're using custom instructions. If you ever
394
+ need to restore the default instructions, you can do so by
395
+ clicking
396
+ </span>{" "}
397
+ <Text
398
+ component="span"
399
+ size="xs"
400
+ c="blue"
401
+ style={{ cursor: "pointer" }}
402
+ onClick={handleRestoreDefaultInstructions}
403
+ >
404
+ here
405
+ </Text>
406
+ <span>.</span>
407
+ </>
408
+ )}
409
+ </>
410
+ }
411
+ autosize
412
+ maxRows={10}
413
+ {...form.getInputProps("systemPrompt")}
414
+ />
415
+
416
+ <Stack gap="xs" mb="md">
417
+ <Text size="sm">Temperature</Text>
418
+ <Text size="xs" c="dimmed">
419
+ Controls randomness in responses. Lower values make responses more
420
+ focused and deterministic, while higher values make them more
421
+ creative and diverse. Defaults to{" "}
422
+ {defaultSettings.inferenceTemperature}.
423
+ </Text>
424
+ <Slider
425
+ {...form.getInputProps("inferenceTemperature")}
426
+ min={0}
427
+ max={2}
428
+ step={0.01}
429
+ marks={[
430
+ { value: 0, label: "0" },
431
+ { value: 1, label: "1" },
432
+ { value: 2, label: "2" },
433
+ ]}
434
+ />
435
+ </Stack>
436
+
437
+ <Stack gap="xs" mb="md">
438
+ <Text size="sm">Top P</Text>
439
+ <Text size="xs" c="dimmed">
440
+ Controls diversity by limiting cumulative probability of tokens.
441
+ Lower values make responses more focused, while higher values
442
+ allow more variety. Defaults to {defaultSettings.inferenceTopP}.
443
+ </Text>
444
+ <Slider
445
+ {...form.getInputProps("inferenceTopP")}
446
+ min={0}
447
+ max={1}
448
+ step={0.01}
449
+ marks={Array.from({ length: 3 }, (_, index) => ({
450
+ value: index / 2,
451
+ label: (index / 2).toString(),
452
+ }))}
453
+ />
454
+ </Stack>
455
+
456
+ <Stack gap="xs" mb="md">
457
+ <Text size="sm">Frequency Penalty</Text>
458
+ <Text size="xs" c="dimmed">
459
+ Reduces repetition by penalizing tokens based on their frequency.
460
+ Higher values decrease the likelihood of repeating the same
461
+ information. Defaults to{" "}
462
+ {defaultSettings.inferenceFrequencyPenalty}.
463
+ </Text>
464
+ <Slider
465
+ {...form.getInputProps("inferenceFrequencyPenalty")}
466
+ min={-2.0}
467
+ max={2.0}
468
+ step={0.01}
469
+ marks={penaltySliderMarks}
470
+ />
471
+ </Stack>
472
+
473
+ <Stack gap="xs" mb="md">
474
+ <Text size="sm">Presence Penalty</Text>
475
+ <Text size="xs" c="dimmed">
476
+ Encourages new topics by penalizing tokens that have appeared.
477
+ Higher values increase the model's likelihood to talk about new
478
+ topics. Defaults to {defaultSettings.inferencePresencePenalty}.
479
+ </Text>
480
+ <Slider
481
+ {...form.getInputProps("inferencePresencePenalty")}
482
+ min={-2.0}
483
+ max={2.0}
484
+ step={0.01}
485
+ marks={penaltySliderMarks}
486
+ />
487
+ </Stack>
488
+ </>
489
+ )}
490
+ </Stack>
491
+ );
492
+ }
client/components/Pages/Main/Menu/ActionsForm.tsx ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Stack } from "@mantine/core";
2
+ import { Suspense, lazy } from "react";
3
+
4
+ const ClearDataButton = lazy(() => import("./ClearDataButton"));
5
+ const ShowLogsButton = lazy(() => import("../../../Logs/ShowLogsButton"));
6
+
7
+ export default function ActionsForm() {
8
+ return (
9
+ <Stack gap="lg">
10
+ <Suspense>
11
+ <ClearDataButton />
12
+ </Suspense>
13
+ <Suspense>
14
+ <ShowLogsButton />
15
+ </Suspense>
16
+ </Stack>
17
+ );
18
+ }
client/components/Pages/Main/Menu/ClearDataButton.tsx ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Stack, Text } from "@mantine/core";
2
+ import { useState } from "react";
3
+ import { useLocation } from "wouter";
4
+ import { addLogEntry } from "../../../../modules/logEntries";
5
+
6
+ export default function ClearDataButton() {
7
+ const [isClearingData, setIsClearingData] = useState(false);
8
+ const [hasClearedData, setHasClearedData] = useState(false);
9
+ const [, navigate] = useLocation();
10
+
11
+ const handleClearDataButtonClick = async () => {
12
+ const sureToDelete = self.confirm(
13
+ "Are you sure you want to reset the settings and delete all files in cache?",
14
+ );
15
+
16
+ if (!sureToDelete) return;
17
+
18
+ addLogEntry("User initiated data clearing");
19
+
20
+ setIsClearingData(true);
21
+
22
+ self.localStorage.clear();
23
+
24
+ for (const cacheName of await self.caches.keys()) {
25
+ await self.caches.delete(cacheName);
26
+ }
27
+
28
+ for (const databaseInfo of await self.indexedDB.databases()) {
29
+ if (databaseInfo.name) self.indexedDB.deleteDatabase(databaseInfo.name);
30
+ }
31
+
32
+ const { clearWllamaCache } = await import("../../../../modules/wllama");
33
+
34
+ await clearWllamaCache();
35
+
36
+ setIsClearingData(false);
37
+
38
+ setHasClearedData(true);
39
+
40
+ addLogEntry("All data cleared successfully");
41
+
42
+ navigate("/", { replace: true });
43
+
44
+ self.location.reload();
45
+ };
46
+
47
+ return (
48
+ <Stack gap="xs">
49
+ <Button
50
+ onClick={handleClearDataButtonClick}
51
+ variant="default"
52
+ loading={isClearingData}
53
+ loaderProps={{ type: "bars" }}
54
+ disabled={hasClearedData}
55
+ >
56
+ {hasClearedData ? "Data cleared" : "Clear all data"}
57
+ </Button>
58
+ <Text size="xs" c="dimmed">
59
+ Reset settings and delete all files in cache to free up space.
60
+ </Text>
61
+ </Stack>
62
+ );
63
+ }
client/components/Pages/Main/Menu/InterfaceSettingsForm.tsx ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Stack,
3
+ Switch,
4
+ useComputedColorScheme,
5
+ useMantineColorScheme,
6
+ } from "@mantine/core";
7
+ import { useForm } from "@mantine/form";
8
+ import { usePubSub } from "create-pubsub/react";
9
+ import { settingsPubSub } from "../../../../modules/pubSub";
10
+
11
+ export default function InterfaceSettingsForm() {
12
+ const [settings, setSettings] = usePubSub(settingsPubSub);
13
+ const form = useForm({
14
+ initialValues: settings,
15
+ onValuesChange: setSettings,
16
+ });
17
+ const { setColorScheme } = useMantineColorScheme();
18
+ const computedColorScheme = useComputedColorScheme("light");
19
+
20
+ const toggleColorScheme = () => {
21
+ setColorScheme(computedColorScheme === "dark" ? "light" : "dark");
22
+ };
23
+
24
+ return (
25
+ <Stack gap="md">
26
+ <Switch
27
+ label="Dark Mode"
28
+ checked={computedColorScheme === "dark"}
29
+ onChange={toggleColorScheme}
30
+ labelPosition="left"
31
+ description="Enable or disable the dark color scheme."
32
+ styles={{ labelWrapper: { width: "100%" } }}
33
+ />
34
+
35
+ <Switch
36
+ {...form.getInputProps("enterToSubmit", {
37
+ type: "checkbox",
38
+ })}
39
+ label="Enter to Submit"
40
+ labelPosition="left"
41
+ description="Enable or disable using Enter key to submit the search query. When disabled, you'll need to click the Search button or use Shift+Enter to submit."
42
+ />
43
+ </Stack>
44
+ );
45
+ }
client/components/Pages/Main/Menu/MenuButton.tsx ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button } from "@mantine/core";
2
+ import { Suspense, lazy, useCallback, useEffect, useState } from "react";
3
+ import { addLogEntry } from "../../../../modules/logEntries";
4
+
5
+ const MenuDrawer = lazy(() => import("./MenuDrawer"));
6
+
7
+ export default function MenuButton() {
8
+ const [isDrawerOpen, setDrawerOpen] = useState(false);
9
+ const [isDrawerLoaded, setDrawerLoaded] = useState(false);
10
+
11
+ const openDrawer = useCallback(() => {
12
+ setDrawerOpen(true);
13
+ addLogEntry("User opened the menu");
14
+ }, []);
15
+
16
+ const closeDrawer = useCallback(() => {
17
+ setDrawerOpen(false);
18
+ addLogEntry("User closed the menu");
19
+ }, []);
20
+
21
+ const handleDrawerLoad = useCallback(() => {
22
+ if (!isDrawerLoaded) {
23
+ addLogEntry("Menu drawer loaded");
24
+ setDrawerLoaded(true);
25
+ }
26
+ }, [isDrawerLoaded]);
27
+
28
+ return (
29
+ <>
30
+ <Button
31
+ size="xs"
32
+ onClick={openDrawer}
33
+ variant="default"
34
+ loading={isDrawerOpen && !isDrawerLoaded}
35
+ >
36
+ Menu
37
+ </Button>
38
+ {(isDrawerOpen || isDrawerLoaded) && (
39
+ <Suspense fallback={<SuspenseListener onUnload={handleDrawerLoad} />}>
40
+ <MenuDrawer onClose={closeDrawer} opened={isDrawerOpen} />
41
+ </Suspense>
42
+ )}
43
+ </>
44
+ );
45
+ }
46
+
47
+ function SuspenseListener({ onUnload }: { onUnload: () => void }) {
48
+ useEffect(() => {
49
+ return () => onUnload();
50
+ }, [onUnload]);
51
+
52
+ return null;
53
+ }
client/components/Pages/Main/Menu/MenuDrawer.tsx ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Accordion,
3
+ ActionIcon,
4
+ Center,
5
+ Drawer,
6
+ type DrawerProps,
7
+ FocusTrap,
8
+ Group,
9
+ HoverCard,
10
+ Stack,
11
+ } from "@mantine/core";
12
+ import { IconBrandGithub } from "@tabler/icons-react";
13
+ import prettyMilliseconds from "pretty-ms";
14
+ import { Suspense, lazy } from "react";
15
+ import { repository } from "../../../../../package.json";
16
+ import { appName, appVersion } from "../../../../modules/appInfo";
17
+ import { addLogEntry } from "../../../../modules/logEntries";
18
+
19
+ const AISettingsForm = lazy(() => import("./AISettingsForm"));
20
+ const SearchSettingsForm = lazy(() => import("./SearchSettingsForm"));
21
+ const InterfaceSettingsForm = lazy(() => import("./InterfaceSettingsForm"));
22
+ const ActionsForm = lazy(() => import("./ActionsForm"));
23
+ const VoiceSettingsForm = lazy(() => import("./VoiceSettingsForm"));
24
+
25
+ export default function MenuDrawer(drawerProps: DrawerProps) {
26
+ return (
27
+ <Drawer
28
+ {...drawerProps}
29
+ position="right"
30
+ size="md"
31
+ title={
32
+ <Group gap="xs">
33
+ <ActionIcon
34
+ variant="subtle"
35
+ component="a"
36
+ color="var(--mantine-color-text)"
37
+ href={repository.url}
38
+ target="_blank"
39
+ onClick={() => addLogEntry("User clicked the GitHub link")}
40
+ >
41
+ <IconBrandGithub size={16} />
42
+ </ActionIcon>
43
+ <HoverCard shadow="md" withArrow>
44
+ <HoverCard.Target>
45
+ <Center>{appName}</Center>
46
+ </HoverCard.Target>
47
+ <HoverCard.Dropdown>
48
+ <Stack gap="xs">
49
+ <Center>{appName}</Center>
50
+ <Center>{`v${appVersion}`}</Center>
51
+ <Center>
52
+ Released{" "}
53
+ {prettyMilliseconds(
54
+ new Date().getTime() -
55
+ new Date(VITE_BUILD_DATE_TIME).getTime(),
56
+ {
57
+ compact: true,
58
+ verbose: true,
59
+ },
60
+ )}{" "}
61
+ ago
62
+ </Center>
63
+ </Stack>
64
+ </HoverCard.Dropdown>
65
+ </HoverCard>
66
+ </Group>
67
+ }
68
+ >
69
+ <FocusTrap.InitialFocus />
70
+ <Drawer.Body>
71
+ <Accordion variant="separated" multiple>
72
+ <Accordion.Item value="aiSettings">
73
+ <Accordion.Control>AI Settings</Accordion.Control>
74
+ <Accordion.Panel>
75
+ <Suspense>
76
+ <AISettingsForm />
77
+ </Suspense>
78
+ </Accordion.Panel>
79
+ </Accordion.Item>
80
+ <Accordion.Item value="searchSettings">
81
+ <Accordion.Control>Search Settings</Accordion.Control>
82
+ <Accordion.Panel>
83
+ <Suspense>
84
+ <SearchSettingsForm />
85
+ </Suspense>
86
+ </Accordion.Panel>
87
+ </Accordion.Item>
88
+ <Accordion.Item value="interfaceSettings">
89
+ <Accordion.Control>Interface Settings</Accordion.Control>
90
+ <Accordion.Panel>
91
+ <Suspense>
92
+ <InterfaceSettingsForm />
93
+ </Suspense>
94
+ </Accordion.Panel>
95
+ </Accordion.Item>
96
+ <Accordion.Item value="voiceSettings">
97
+ <Accordion.Control>Voice Settings</Accordion.Control>
98
+ <Accordion.Panel>
99
+ <Suspense>
100
+ <VoiceSettingsForm />
101
+ </Suspense>
102
+ </Accordion.Panel>
103
+ </Accordion.Item>
104
+ <Accordion.Item value="actions">
105
+ <Accordion.Control>Actions</Accordion.Control>
106
+ <Accordion.Panel>
107
+ <Suspense>
108
+ <ActionsForm />
109
+ </Suspense>
110
+ </Accordion.Panel>
111
+ </Accordion.Item>
112
+ </Accordion>
113
+ </Drawer.Body>
114
+ </Drawer>
115
+ );
116
+ }
client/components/Pages/Main/Menu/SearchSettingsForm.tsx ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Slider, Stack, Switch, Text } from "@mantine/core";
2
+ import { useForm } from "@mantine/form";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { settingsPubSub } from "../../../../modules/pubSub";
5
+
6
+ export default function SearchSettingsForm() {
7
+ const [settings, setSettings] = usePubSub(settingsPubSub);
8
+ const form = useForm({
9
+ initialValues: settings,
10
+ onValuesChange: setSettings,
11
+ });
12
+
13
+ return (
14
+ <Stack gap="md">
15
+ <Stack gap="xs" mb="md">
16
+ <Text size="sm">Search Results Limit</Text>
17
+ <Text size="xs" c="dimmed">
18
+ Maximum number of search results to fetch. A higher value provides
19
+ more results but may increase search time.
20
+ </Text>
21
+ <Slider
22
+ {...form.getInputProps("searchResultsLimit")}
23
+ min={5}
24
+ max={30}
25
+ step={5}
26
+ marks={[5, 10, 15, 20, 25, 30].map((value) => ({
27
+ value,
28
+ label: value.toString(),
29
+ }))}
30
+ />
31
+ </Stack>
32
+
33
+ <Switch
34
+ {...form.getInputProps("enableTextSearch", {
35
+ type: "checkbox",
36
+ })}
37
+ label="Text Search"
38
+ labelPosition="left"
39
+ description="Enable or disable text search results. When enabled, relevant web pages will be displayed in the search results."
40
+ />
41
+
42
+ <Switch
43
+ {...form.getInputProps("enableImageSearch", {
44
+ type: "checkbox",
45
+ })}
46
+ label="Image Search"
47
+ labelPosition="left"
48
+ description="Enable or disable image search results. When enabled, relevant images will be displayed alongside web search results."
49
+ />
50
+ </Stack>
51
+ );
52
+ }
client/components/Pages/Main/Menu/VoiceSettingsForm.tsx ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Select, Stack, Text } from "@mantine/core";
2
+ import { useForm } from "@mantine/form";
3
+ import getUnicodeFlagIcon from "country-flag-icons/unicode";
4
+ import { usePubSub } from "create-pubsub/react";
5
+ import { useCallback, useEffect, useState } from "react";
6
+ import { settingsPubSub } from "../../../../modules/pubSub";
7
+
8
+ export default function VoiceSettingsForm() {
9
+ const [settings, setSettings] = usePubSub(settingsPubSub);
10
+ const [voices, setVoices] = useState<{ value: string; label: string }[]>([]);
11
+
12
+ const getCountryFlag = useCallback((langCode: string) => {
13
+ try {
14
+ const country = langCode.split("-")[1];
15
+
16
+ if (country.length !== 2) throw new Error("Invalid country code");
17
+
18
+ return getUnicodeFlagIcon(country);
19
+ } catch {
20
+ return "🌐";
21
+ }
22
+ }, []);
23
+
24
+ const form = useForm({
25
+ initialValues: settings,
26
+ onValuesChange: setSettings,
27
+ });
28
+
29
+ useEffect(() => {
30
+ const updateVoices = () => {
31
+ const availableVoices = self.speechSynthesis.getVoices();
32
+ const uniqueVoices = Array.from(
33
+ new Map(
34
+ availableVoices.map((voice) => [voice.voiceURI, voice]),
35
+ ).values(),
36
+ );
37
+ const voiceOptions = uniqueVoices
38
+ .sort((a, b) => a.lang.localeCompare(b.lang))
39
+ .map((voice) => ({
40
+ value: voice.voiceURI,
41
+ label: `${getCountryFlag(voice.lang)} ${voice.name} β€’ ${voice.lang}`,
42
+ }));
43
+ setVoices(voiceOptions);
44
+ };
45
+
46
+ updateVoices();
47
+
48
+ self.speechSynthesis.onvoiceschanged = updateVoices;
49
+
50
+ return () => {
51
+ self.speechSynthesis.onvoiceschanged = null;
52
+ };
53
+ }, [getCountryFlag]);
54
+
55
+ return (
56
+ <Stack gap="xs">
57
+ <Text size="sm">Voice Selection</Text>
58
+ <Text size="xs" c="dimmed">
59
+ Choose the voice to use when reading AI responses aloud.
60
+ </Text>
61
+ <Select
62
+ {...form.getInputProps("selectedVoiceId")}
63
+ data={voices}
64
+ searchable
65
+ nothingFoundMessage="No voices found"
66
+ placeholder="Auto-detected"
67
+ allowDeselect={true}
68
+ clearable
69
+ />
70
+ </Stack>
71
+ );
72
+ }
client/components/Search/Form/SearchForm.tsx ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Group, Stack, Textarea } from "@mantine/core";
2
+ import { usePubSub } from "create-pubsub/react";
3
+ import {
4
+ type ChangeEvent,
5
+ type KeyboardEvent,
6
+ type ReactNode,
7
+ useCallback,
8
+ useEffect,
9
+ useRef,
10
+ useState,
11
+ } from "react";
12
+ import { useLocation } from "wouter";
13
+ import { handleEnterKeyDown } from "../../../modules/keyboard";
14
+ import { addLogEntry } from "../../../modules/logEntries";
15
+ import { postMessageToParentWindow } from "../../../modules/parentWindow";
16
+ import { settingsPubSub } from "../../../modules/pubSub";
17
+ import { getRandomQuerySuggestion } from "../../../modules/querySuggestions";
18
+ import { sleepUntilIdle } from "../../../modules/sleep";
19
+ import { searchAndRespond } from "../../../modules/textGeneration";
20
+
21
+ export default function SearchForm({
22
+ query,
23
+ updateQuery,
24
+ additionalButtons,
25
+ }: {
26
+ query: string;
27
+ updateQuery: (query: string) => void;
28
+ additionalButtons?: ReactNode;
29
+ }) {
30
+ const textAreaRef = useRef<HTMLTextAreaElement>(null);
31
+ const [textAreaValue, setTextAreaValue] = useState(query);
32
+ const defaultSuggestedQuery = "Anything you need!";
33
+ const [suggestedQuery, setSuggestedQuery] = useState(defaultSuggestedQuery);
34
+ const [, navigate] = useLocation();
35
+ const [settings] = usePubSub(settingsPubSub);
36
+
37
+ const handleMount = useCallback(async () => {
38
+ await sleepUntilIdle();
39
+ searchAndRespond();
40
+ }, []);
41
+
42
+ const handleInitialSuggestion = useCallback(async () => {
43
+ const suggestion = await getRandomQuerySuggestion();
44
+ setSuggestedQuery(suggestion);
45
+ }, []);
46
+
47
+ useEffect(() => {
48
+ handleMount();
49
+ handleInitialSuggestion();
50
+ }, [handleMount, handleInitialSuggestion]);
51
+
52
+ const handleInputChange = async (event: ChangeEvent<HTMLTextAreaElement>) => {
53
+ const text = event.target.value;
54
+
55
+ setTextAreaValue(text);
56
+
57
+ if (text.length === 0) {
58
+ setSuggestedQuery(await getRandomQuerySuggestion());
59
+ }
60
+ };
61
+
62
+ const handleClearButtonClick = async () => {
63
+ setSuggestedQuery(await getRandomQuerySuggestion());
64
+ setTextAreaValue("");
65
+ textAreaRef.current?.focus();
66
+ addLogEntry("User cleaned the search query field");
67
+ };
68
+
69
+ const startSearching = useCallback(() => {
70
+ const queryToEncode =
71
+ textAreaValue.trim().length >= 1 ? textAreaValue : suggestedQuery;
72
+
73
+ setTextAreaValue(queryToEncode);
74
+
75
+ const queryString = `q=${encodeURIComponent(queryToEncode)}`;
76
+
77
+ postMessageToParentWindow({ queryString, hash: "" });
78
+
79
+ navigate(`/?${queryString}`, { replace: true });
80
+
81
+ updateQuery(queryToEncode);
82
+
83
+ searchAndRespond();
84
+
85
+ addLogEntry(
86
+ `User submitted a search with ${queryToEncode.length} characters length`,
87
+ );
88
+ }, [textAreaValue, suggestedQuery, updateQuery, navigate]);
89
+
90
+ const handleSubmit = (event: { preventDefault: () => void }) => {
91
+ event.preventDefault();
92
+ startSearching();
93
+ };
94
+
95
+ const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement>) => {
96
+ handleEnterKeyDown(event, settings, () => handleSubmit(event));
97
+ };
98
+
99
+ return (
100
+ <form onSubmit={handleSubmit} style={{ width: "100%" }}>
101
+ <Stack gap="xs">
102
+ <Textarea
103
+ value={textAreaValue}
104
+ placeholder={suggestedQuery}
105
+ ref={textAreaRef}
106
+ onKeyDown={handleKeyDown}
107
+ onChange={handleInputChange}
108
+ autosize
109
+ minRows={1}
110
+ maxRows={8}
111
+ autoFocus
112
+ />
113
+ <Group gap="xs">
114
+ {textAreaValue.length >= 1 ? (
115
+ <Button
116
+ size="xs"
117
+ onClick={handleClearButtonClick}
118
+ variant="default"
119
+ >
120
+ Clear
121
+ </Button>
122
+ ) : null}
123
+ <Button size="xs" type="submit" variant="default" flex={1}>
124
+ Search
125
+ </Button>
126
+ {additionalButtons}
127
+ </Group>
128
+ </Stack>
129
+ </form>
130
+ );
131
+ }
client/components/Search/Results/Graphical/ImageResultsList.tsx ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Carousel } from "@mantine/carousel";
2
+ import { Button, Group, Stack, Text, Transition, rem } from "@mantine/core";
3
+ import { useEffect, useState } from "react";
4
+ import type { ImageSearchResult } from "../../../../modules/types";
5
+ import "@mantine/carousel/styles.css";
6
+ import Lightbox from "yet-another-react-lightbox";
7
+ import Captions from "yet-another-react-lightbox/plugins/captions";
8
+ import "yet-another-react-lightbox/styles.css";
9
+ import "yet-another-react-lightbox/plugins/captions.css";
10
+ import { addLogEntry } from "../../../../modules/logEntries";
11
+ import { getHostname } from "../../../../modules/stringFormatters";
12
+
13
+ export default function ImageResultsList({
14
+ imageResults,
15
+ }: {
16
+ imageResults: ImageSearchResult[];
17
+ }) {
18
+ const [isLightboxOpen, setLightboxOpen] = useState(false);
19
+ const [lightboxIndex, setLightboxIndex] = useState(0);
20
+ const [canStartTransition, setCanStartTransition] = useState(false);
21
+
22
+ useEffect(() => {
23
+ setCanStartTransition(true);
24
+ }, []);
25
+
26
+ const handleImageClick = (index: number) => {
27
+ setLightboxIndex(index);
28
+ setLightboxOpen(true);
29
+ };
30
+
31
+ const imageStyle = {
32
+ objectFit: "cover",
33
+ height: rem(180),
34
+ width: rem(240),
35
+ borderRadius: rem(4),
36
+ border: `${rem(2)} solid var(--mantine-color-default-border)`,
37
+ cursor: "zoom-in",
38
+ } as const;
39
+
40
+ return (
41
+ <>
42
+ <Carousel slideSize="0" slideGap="xs" align="start" dragFree loop>
43
+ {imageResults.map(([title, sourceUrl, thumbnailUrl], index) => (
44
+ <Transition
45
+ key={`${title}-${sourceUrl}-${thumbnailUrl}`}
46
+ mounted={canStartTransition}
47
+ transition="fade"
48
+ timingFunction="ease"
49
+ enterDelay={index * 250}
50
+ duration={1500}
51
+ >
52
+ {(styles) => (
53
+ <Carousel.Slide style={styles}>
54
+ <img
55
+ alt={title}
56
+ src={thumbnailUrl}
57
+ loading="lazy"
58
+ onClick={() => handleImageClick(index)}
59
+ onKeyDown={(e) => {
60
+ if (e.key === "Enter") {
61
+ handleImageClick(index);
62
+ }
63
+ }}
64
+ style={imageStyle}
65
+ />
66
+ </Carousel.Slide>
67
+ )}
68
+ </Transition>
69
+ ))}
70
+ </Carousel>
71
+ <Lightbox
72
+ open={isLightboxOpen}
73
+ close={() => setLightboxOpen(false)}
74
+ plugins={[Captions]}
75
+ index={lightboxIndex}
76
+ slides={imageResults.map(([title, url, thumbnailUrl, sourceUrl]) => ({
77
+ src: thumbnailUrl,
78
+ alt: title,
79
+ description: (
80
+ <Stack align="center" gap="md">
81
+ {title && (
82
+ <Text component="cite" ta="center">
83
+ {title}
84
+ </Text>
85
+ )}
86
+ <Group align="center" justify="center" gap="xs">
87
+ <Button
88
+ variant="subtle"
89
+ component="a"
90
+ size="xs"
91
+ href={sourceUrl}
92
+ target="_blank"
93
+ title="Click to see the image in full size"
94
+ rel="noopener noreferrer"
95
+ onClick={() => {
96
+ addLogEntry("User visited an image result in full size");
97
+ }}
98
+ >
99
+ View in full resolution
100
+ </Button>
101
+ <Button
102
+ variant="subtle"
103
+ component="a"
104
+ href={url}
105
+ target="_blank"
106
+ size="xs"
107
+ title="Click to visit the page where the image was found"
108
+ rel="noopener noreferrer"
109
+ onClick={() => {
110
+ addLogEntry("User visited an image result source");
111
+ }}
112
+ >
113
+ Visit {getHostname(url)}
114
+ </Button>
115
+ </Group>
116
+ </Stack>
117
+ ),
118
+ }))}
119
+ />
120
+ </>
121
+ );
122
+ }