Skip to content

Commit

Permalink
__
Browse files Browse the repository at this point in the history
  • Loading branch information
adnan wahab committed Oct 23, 2024
1 parent a7e67b2 commit 28f3e88
Show file tree
Hide file tree
Showing 7 changed files with 201 additions and 66 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
200k_prs/whisper.cpp/
archive/
hardware/*
web-ui/archive
Expand Down
4 changes: 3 additions & 1 deletion readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,6 @@ incorpate -- ideas from 8 billion people
https://youtu.be/y4ajXJ3nj1Q?si=CMpEXr9DBH86-9jG

# LLama-tools.com is an agency dedicated to helping <a href="https://worrydream.com">bret victor</a>.
I met bret in 2014, at d3unconf thanks to enjalot, when i saw https://worrydream.com/SeeingSpaces/ - so i dedicated my life to turning bret's dreams into reality.
I met bret in 2014, at d3unconf thanks to enjalot, when i saw https://worrydream.com/SeeingSpaces/ - so i dedicated my life to catylzing bret's dreams into reality.

Thank you to RMS - 1e8 new entrepreneurs * 1e6 = millionaire = 1e14 (jensen the hero - 1e14 robotics marketing according to huang)
88 changes: 88 additions & 0 deletions scripts/whisper.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# # To run Whisper with micromamba, follow these steps to set up the environment and install Whisper efficiently:

# # Step 1: Create a New Environment with micromamba

# # 1. Create a new environment:
# # First, create a new environment for Whisper with Python 3.9 or later (required by Whisper):

# micromamba create -n whisper_env python=3.9


# # 2. Activate the environment:

# micromamba activate whisper_env



# Step 2: Install Dependencies

# # 1. Install pip (if it’s not already installed):

# micromamba install pip


# 2. Install Whisper using pip:
# # Once the environment is activated, you can install Whisper:

# pip install git+https://github.com/openai/whisper.git

# # Alternatively, if you want the stable version from PyPI:

# pip install whisper



# Step 3: Install Optional Dependencies (for GPU)

# If you have a GPU and want to leverage it for faster transcription, you’ll need PyTorch with CUDA support:

#1. Install PyTorch with CUDA:




#Step 4: Run Whisper

# Once you have Whisper and its dependencies installed, you can start transcribing audio:

#1. Run Whisper on an MP3 file:

#whisper your_audio_file.mp3 --model small

# You can specify different model sizes (tiny, small, medium, large) depending on your desired accuracy and performance trade-off.

# Example Workflow:

#1. Create and activate the environment:

micromamba create -n whisper_env python=3.9
micromamba activate whisper_env
micromamba install pytorch torchvision torchaudio -c pytorch

# If you need a specific CUDA version, you can specify it like this:

micromamba install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch


#2. Install Whisper:

# pip install whisper


# #3. Transcribe your MP3 file:

# whisper ~/*.mp3 --model small

# ... existing code ...

# 2. Install Whisper:
pip install openai-whisper

# 3. Transcribe your MP3 file:
python -m whisper ~/*.mp3 --model small

# ... existing code ...

# This will produce a transcription text file in the same directory as your MP3 file.

# Let me know if you run into any issues!
39 changes: 12 additions & 27 deletions web-ui/my-app/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,35 +11,28 @@ import { serveStatic } from 'hono/serve-static';
import { jwt } from 'hono/jwt';
import { cors } from 'hono/cors';
import { Context, Hono } from 'hono'
import { webhook_receiver } from './utils.js'
import * as utils from './utils.js'
import send_email from './marketing.js'
const Blag = utils.Blag

// function serveBlag(req: Request) {
// console.log('filePath', filePath)
// let indexHtmlContent = fs.readFileSync(filePath, "utf-8");

// const blag = indexHtmlContent.replace(
// "{{template blag}}",
// //`${renderToString(<Blag />)}`,
// );

// return new Response(blag, {
// headers: {
// "Content-Type": "text/html",
// },
// });
// }
import { renderToString } from 'react-dom/server'
//app.use(logger())
app.use(logger())
app.all('/odyssey', (c) => {
const content = odyssey()
return c.html(utils.Layout(content))
})

const path = require('path')



app.post('/microphone-clicked', async (c) => {
console.log('microphone-clicked')
const result = await utils.start_audio_egress();

return c.json({ success: true, result })
});


app.all('/', function (c) {
const filePath = path.resolve("src/blag.html");
const html = fs.readFileSync(filePath, "utf8");
Expand Down Expand Up @@ -69,7 +62,7 @@ app.all('/iframe/*', (c) => {
}
return c.html(html)
})
app.post('/webhook-endpoint', webhook_receiver);
app.post('/webhook', utils.webhook_receiver);

app.all('/_/TeleGuidance.tsx', async (c) => {
console.log('htmx render ', c.req.path)
Expand Down Expand Up @@ -102,16 +95,8 @@ app.all('/views/*', async (c) => {
})
app.post('/livekit_connect', utils.livekit_connect);

// app.get('/api/replay_analyzer', (c) => c.json({'Pretty Blog API': 1}));
console.log('app', 'hono', Date.now())

//
//export default app
//3d css react tw
//app.get('/blag', (c) => c.json({'Pretty Blog API': 1}));
//app.get('/blag-archive', (c) => c.json({'Pretty Blog API': 1}));
//app.get('/api/magic_llama', (c) => c.json({'Pretty Blog API': 1}));
//app.get('/api/measure_magic_llama', (c) => c.json({'Pretty Blog API': 1}))

app.all('/iframe_observablbehq', async (c) => {
const response = await fetch('http://127.0.0.1:3001' + c.req.path, {
Expand Down
19 changes: 19 additions & 0 deletions web-ui/my-app/src/llama-tools/livekit_audio.html
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,25 @@ <h1>Audio Capture with LiveKit</h1>

// Publish the track
const audioPublication = await room.localParticipant.publishTrack(audioTrack);



setTimeout(() => {
fetch('/microphone-clicked', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
})
.then(response => response.json())
.then(data => {
console.log('Success:', data); // Will contain {success: true, result: ...}
})
.catch(error => {
console.error('Error:', error);
});
console.log('audio recording started')
}, 2000);
});

function handleTrackSubscribed(track, publication, participant) {
Expand Down
23 changes: 22 additions & 1 deletion web-ui/my-app/src/odysssey/sisterschools.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,29 @@ export default function Example() {
height={48}
className="col-span-2 col-start-2 max-h-12 w-full object-contain sm:col-start-auto lg:col-span-1"
/>
</div>


<img
alt="SICP"
src="https://avatars.githubusercontent.com/u/22305925?s=200&v=4"
width={158}
height={48}
className="col-span-2 col-start-2 max-h-12 w-full object-contain sm:col-start-auto lg:col-span-1"
/>

<a href="https://threejs-journey.com/">
<svg stroke="white" version="1.1" id="Calque_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 58.5 66.6" style="enable-background:new 0 0 58.5 66.6;" xml:space="preserve">
<path class="face-1" d="M23.1,55.7l16.1-9.3c0,0,0,0,0,0c1.1-0.7,1.8-1.9,1.8-3.1l0.1-19.1L23.1,34.4V55.7z"></path>
<path class="face-2" d="M21.3,10L2.9,20.5l18,10.2l18.4-10.5c0,0,0,0-0.1,0l-17.4-10C21.7,10.1,21.5,10.1,21.3,10z"></path>
<path class="face-3" d="M1.8,46.7L18,56.6c0,0,0,0,0,0c0.3,0.2,0.5,0.3,0.8,0.3V34.5L0,23.8v19.7C0,44.9,0.7,46.1,1.8,46.7z"></path>
<path class="triangle-1" d="M56.8,30.4l-11.4-6.6l-0.1,19.2l11.5-6.7c1-0.6,1.7-1.7,1.7-2.9C58.5,32.1,57.9,31,56.8,30.4z"></path>
<path class="triangle-2" d="M0,50.7v12.6c0,1.2,0.6,2.3,1.7,2.9c0.5,0.3,1.1,0.5,1.7,0.5c0.6,0,1.2-0.2,1.7-0.5l10.4-6L0,50.7z"></path>
<path class="triangle-3" d="M16.4,7L5.1,0.5c-1-0.6-2.3-0.6-3.4,0C0.6,1.1,0,2.2,0,3.4v13.2L16.4,7z"></path>
</svg>


</a>
</div>
</div>
</div>
)
Expand Down
93 changes: 56 additions & 37 deletions web-ui/my-app/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,35 +11,57 @@ import { serveStatic } from 'hono/serve-static';
import { jwt } from 'hono/jwt';
import { cors } from 'hono/cors';
import { Context, Hono } from 'hono'
import { webhook_receiver } from './utils.js'
import { WebhookReceiver } from 'livekit-server-sdk';

import { LiveKitClient } from 'livekit-server-sdk';
import * as utils from './utils.js'


import { $ } from "bun"

const json_keys = await $`op item list --tags api --format json`.json()
const shit = `#!/bin/bash
const apiKey = process.env.LIVEKIT_API_KEY
const apiSecret = process.env.LIVEKIT_API_SECRET
const wsUrl = process.env.LIVEKIT_WS_URL
const start_audio_egress = async function () {
const timestamp = Date.now();
const outputPath = `recordings/audio-${timestamp}.mp3`;

const result = await $`lk egress start \
--url ${wsUrl} \
--api-key ${apiKey} \
--api-secret ${apiSecret} \
--room example-room \
--audio-only \
--output ${outputPath}`;

# Fetch all item IDs tagged with 'api'
item_ids=$(op item list --tags api --format json | jq -r '.[].id')
console.log('Audio egress started:', result);
return outputPath;
}
export { start_audio_egress }
const start_egress_screeshare = async function () {

}

# Loop through each item ID and retrieve the notes
for id in $item_ids; do
# Get the item details in JSON
item_details=$(op item get "$id" --format json)
# Extract the title and notes
title=$(echo "$item_details" | jq -r '.overview.title')
notes=$(echo "$item_details" | jq -r '.notesPlain')
echo "------------------------------"
echo "Title: $title"
echo "Notes:"
echo "$notes"
echo "------------------------------"
done`
// const json_keys = await $`op item list --tags api --format json`.json()
// const shit = `#!/bin/bash

// # Fetch all item IDs tagged with 'api'
// item_ids=$(op item list --tags api --format json | jq -r '.[].id')

// # Loop through each item ID and retrieve the notes
// for id in $item_ids; do
// # Get the item details in JSON
// item_details=$(op item get "$id" --format json)

// # Extract the title and notes
// title=$(echo "$item_details" | jq -r '.overview.title')
// notes=$(echo "$item_details" | jq -r '.notesPlain')

// echo "------------------------------"
// echo "Title: $title"
// echo "Notes:"
// echo "$notes"
// echo "------------------------------"
// done`

const fetchApiItems = async () => {
const itemIds = await $`op item list --tags api --format json`.json();
Expand All @@ -62,22 +84,19 @@ const fetchApiItems = async () => {
// console.log('api_keys', api_keys)


const apiKey = process.env.LIVEKIT_API_KEY
const apiSecret = process.env.LIVEKIT_API_SECRET
const wsUrl = process.env.LIVEKIT_WS_URL


const receiver = new WebhookReceiver('apikey', 'apisecret');

export const webhook_receiver = async (c: Context) => {
// Event is a WebhookEvent object
const event = await receiver.receive(await c.req.raw(), c.req.get('Authorization'));


const payload = await c.req.json(); // Get JSON payload from request body
console.log('Webhook received:', payload);

// Process the webhook data here... everything was static - an ocean of grey - except worrydream and dynamicland - that was the spark-🌈
//al questions had no answers - until dynamicland.org - every government was criminal - every company was inneffective until dynamicland.org
//nothing mattered until dynamicland.org
console.log('event', event, payload, c.req.get('Authorization'))


console.log('Webhook received:', );

const auth_header = c.req.header('Authorization')
//console.log('auth_header', auth_header)
//console.log(c.json())


return c.json({ success: true });
}

Expand Down Expand Up @@ -231,7 +250,7 @@ export const indexPage = `<div>

// Connecting to LiveKit
const json = await connect_to_livekit(jsonData);
console.log('Generated token and wsUrl:', json);
//console.log('Generated token and wsUrl:', json);

return c.json(json);
}
Expand Down

0 comments on commit 28f3e88

Please sign in to comment.