{/* The main content of the app: logo + recorder */}
{/*
TranscriptDisplay is already included inside AudioRecorder as soon as
a transcript is generated, so we don't need to call it separately here.
*/}
);
}
export default HomePage;
HomePage.css ##
.home-page-container {
display: flex;
flex-direction: column;
align-items: center;
width: 100%;
max-width: 600px; /* For responsive design, can adjust or remove */
margin: 0 auto;
padding: 1rem;
background-color: rgba(255, 255, 255, 0.9); /* Slight white overlay */
border-radius: 8px;
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);
}
--------------------------------------------------------------------------------
Footer.js ##
import React from 'react';
import './Footer.css';
function Footer() {
return (
);
}
export default Footer;
Footer.css ##
.footer-container {
background-color: #231f20;
color: #fff;
text-align: center;
padding: 1rem;
width: 100%;
margin-top: auto; /* Ensures the footer stays at the bottom if content is short */
}
.footer-container p {
margin: 0;
font-size: 0.9rem;
}
SESSION 5 - PAGES PT1
Page1.js ##
import React, { useState, useEffect } from 'react';
import { Link } from 'react-router-dom';
import './Page1.css';
function Page1() {
// Simple counter to demonstrate state usage
const [count, setCount] = useState(0);
useEffect(() => {
console.log('Page1 mounted');
// Log to console when component mounts
}, []);
const handleIncrement = () => {
// Increment count for demonstration
setCount((prev) => prev + 1);
};
return (
Page1 - Welcome
This is the first page in Session 5.
Go to Page2
);
}
export default Page1
Page1.css ##
.page1-container {
background-color: #fff;
width: 90%;
max-width: 500px;
margin: 1.5rem auto;
padding: 1.25rem;
border-radius: 8px;
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);
text-align: center;
}
.page1-container h2 {
margin-bottom: 1rem;
}
.page1-container button {
background-color: #4caf50; /* Green */
color: #fff;
padding: 0.75rem 1.25rem;
border: none;
border-radius: 4px;
font-size: 1rem;
margin-top: 1rem;
}
.page1-container button:hover {
opacity: 0.9;
}
.nav-link {
margin-top: 1rem;
}
--------------------------------------------------------------------------------
Page2.js ##
import React, { useEffect, useState } from 'react';
import { Link } from 'react-router-dom';
import './Page2.css';
function Page2() {
// data will store the result from a sample fetch
const [data, setData] = useState(null);
const [error, setError] = useState('');
useEffect(() => {
console.log('Page2 mounted');
// Sample fetch from a public endpoint for demonstration
const fetchData = async () => {
try {
console.log('Fetching sample data...');
const response = await fetch('https://jsonplaceholder.typicode.com/todos/1');
if (!response.ok) {
throw new Error(`Network response was not ok - status: ${response.status}`);
}
const result = await response.json();
console.log('Fetch response:', result);
setData(result);
} catch (err) {
console.error('Error fetching data:', err);
setError(err.message);
}
};
fetchData();
}, []);
return (
App.js ##
import React from 'react';
import { BrowserRouter as Router, Routes, Route } from 'react-router-dom';
import Navbar from './components/Navbar';
import Footer from './components/Footer';
import HomePage from './components/HomePage';
import Page1 from './pages/Page1';
import Page2 from './pages/Page2';
import Page3 from './pages/Page3';
function App() {
return (
{/* Global Navbar at the top */}
{/* Main application routes */}
} />
} />
} />
} />
{/* Consider adding a 404 route if needed */}
{/* } /> */}
{/* Global Footer at the bottom */}
);
}
export default App
App.css ##
@import url('https://fonts.googleapis.com/css2?family=Montserrat:wght@400;700&display=swap');
/* Global reset and font settings */
body {
margin: 0;
padding: 0;
font-family: 'Montserrat', sans-serif;
background: linear-gradient(to bottom, #f3e5f5, #fce4ec);
min-height: 100vh;
display: flex;
flex-direction: column;
}
.App {
/* Allow the content to stack with navbar at the top, footer at bottom */
flex: 1;
display: flex;
flex-direction: column;
}
h1, p {
text-align: center;
color: #444;
margin: 1rem;
}
button {
cursor: pointer;
font-family: inherit;
transition: background-color 0.3s ease, opacity 0.3s ease;
}
button:hover {
opacity: 0.9;
}
a {
color: #0066cc;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
SESSION 8 - README
README.md
# Voice 2 Clip
## Project Overview
Voice 2 Clip is a simple Progressive Web App (PWA) that records your voice, transcribes it using the OpenAI Whisper API, and lets you copy the resulting transcription to your clipboard. The design is mobile-first, with a light gradient background (light purple to pink), a large record/stop button that pulses red while recording, and a simple text field with a copy-to-clipboard button.
The original goal:
• Provide a minimal one-page experience, optimized for phone usage.
• Allow the user to record audio, send it to Whisper for transcription, and copy the transcribed text.
• Implement basic PWA functionality (manifest.json, etc.) so the app can be installed on mobile devices.
## Features & Pages
1. **Home ("/")**
• Combines the core functionality: logo, record button, and transcription display (copy-to-clipboard).
• Utilizes the built-in components for audio recording and text display.
2. **Page1 ("/page1")**
• Demonstrates simple state management (a counter).
• Offers a link to navigate to Page2.
3. **Page2 ("/page2")**
• Fetches and displays sample data from a public API endpoint (jsonplaceholder.typicode.com).
• Demonstrates simple error handling and data display.
• Offers a link to navigate to Page3.
4. **Page3 ("/page3")**
• Simple final page with navigation back to home or to Page2.
• Demonstrates programmatic navigation using React Router.
### Main Components:
1. **Navbar**
• Displays the app’s logo and basic navigation links.
2. **Footer**
• Basic footer with copyright information.
3. **Logo**
• Displays the logo.png in a styled container.
4. **AudioRecorder**
• Handles microphone permission requests and recording of audio via the MediaRecorder API.
• On stop, it calls the Whisper API to transcribe the recorded audio.
5. **TranscriptDisplay**
• Shows the transcription text in a textarea.
• Includes a "Copy to Clipboard" button.
### Services:
- **api.js**
• Contains the `transcribeAudio` function, which sends audio data to the OpenAI Whisper API for transcription.
## Prerequisites & Installation
1. **Clone or Download**
• Clone this repository or download the source code.
2. **Install Dependencies**
• Navigate into the project folder and run:
```
npm install
```
3. **Environment Variables**
• Create a file named `.env` in the root of your project.
• Add your OpenAI API key:
```
REACT_APP_OPENAI_API_KEY=your_openai_api_key_here
```
• Make sure to restart the development server if you change this file.
## Running the App
1. **Development Server**
• Start the development server with:
```
npm start
```
• Open your browser and visit:
```
http://localhost:3000/
```
2. **Live Reloading**
• Any changes you make to the source files will automatically reload the page in your browser.
## Deployment Notes
• **Production Build**:
```
npm run build
```
This creates an optimized build folder, ready for production.
• **PWA Configuration**:
- To leverage PWA features, ensure the `manifest.json` file is correctly set up with your chosen app name, icons, and background color.
- By default, Create React App includes a service worker in the production build. You can customize its configuration under “serviceWorkerRegistration.js”.
• **Hosting**:
- You can host the optimized build folder on Netlify, Vercel, GitHub Pages, or any static hosting provider.
## Environment Variables
• **REACT_APP_OPENAI_API_KEY**
- Used by `api.js` to authenticate requests to the Whisper API.
- Loaded automatically by Create React App from the `.env` file.
## Firebase Config
This app does not require Firebase. If you wish to integrate Firebase in the future (e.g., for saving transcripts or user data), you can add your Firebase configuration, but it is not needed for the current functionality.
## APIs
1. **OpenAI Whisper API**
- The app uses an HTTP `POST` request to the Whisper endpoint (https://api.openai.com/v1/audio/transcriptions).
- Make sure you have an active OpenAI account and an API key to use this service.
2. **Sample Public API**
- Pages like Page2 fetch data from https://jsonplaceholder.typicode.com for demonstration.
Enjoy using Voice 2 Clip! If you have any issues or improvement ideas, please feel free to open an issue or contribute.