Best Practices
Essential performance best practices for building scalable, efficient applications with optimal user experience.
Performance Best Practices
Follow these essential best practices to build high-performance, scalable applications that provide excellent user experiences.
Architecture Best Practices
Design for Performance from the Start
1. Choose the Right Architecture Pattern
// Use appropriate patterns for different use cases
// For real-time features: Event-driven architecture
export class EventBus {
private listeners = new Map<string, Function[]>()
emit(event: string, data: any) {
const eventListeners = this.listeners.get(event) || []
eventListeners.forEach(listener => listener(data))
}
on(event: string, listener: Function) {
if (!this.listeners.has(event)) {
this.listeners.set(event, [])
}
this.listeners.get(event)!.push(listener)
}
}
// For heavy computations: Worker threads
import { Worker, isMainThread, parentPort } from 'worker_threads'
if (isMainThread) {
// Main thread
export function processLargeDataset(data: any[]) {
return new Promise((resolve, reject) => {
const worker = new Worker(__filename)
worker.postMessage(data)
worker.on('message', resolve)
worker.on('error', reject)
})
}
} else {
// Worker thread
parentPort?.on('message', (data) => {
const result = expensiveProcessing(data)
parentPort?.postMessage(result)
})
}
2. Implement Proper Data Flow
// Unidirectional data flow for predictable performance
export class DataStore {
private state: any = {}
private subscribers: Function[] = []
setState(newState: any) {
this.state = { ...this.state, ...newState }
this.notifySubscribers()
}
getState() {
return this.state
}
subscribe(callback: Function) {
this.subscribers.push(callback)
return () => {
this.subscribers = this.subscribers.filter(sub => sub !== callback)
}
}
private notifySubscribers() {
this.subscribers.forEach(callback => callback(this.state))
}
}
Frontend Best Practices
Resource Optimization
1. Optimize Bundle Size
// Use dynamic imports for code splitting
const LazyComponent = lazy(() => import('./ExpensiveComponent'))
// Tree shake unused code
import { debounce } from 'lodash-es' // Import only what you need
// Analyze bundle with webpack-bundle-analyzer
// npm install --save-dev webpack-bundle-analyzer
2. Efficient Component Design
// Memoize expensive calculations
import { useMemo, useCallback } from 'react'
export function ProductList({ products, filter }: Props) {
// Memoize expensive filtering
const filteredProducts = useMemo(() => {
return products.filter(product =>
product.name.toLowerCase().includes(filter.toLowerCase())
)
}, [products, filter])
// Memoize event handlers
const handleProductClick = useCallback((productId: string) => {
// Handle click
}, [])
return (
<div>
{filteredProducts.map(product => (
<ProductCard
key={product.id}
product={product}
onClick={handleProductClick}
/>
))}
</div>
)
}
// Use React.memo for expensive components
export const ProductCard = React.memo(function ProductCard({ product, onClick }: Props) {
return (
<div onClick={() => onClick(product.id)}>
<h3>{product.name}</h3>
<p>{product.description}</p>
</div>
)
})
3. Optimize Images and Assets
// Use Next.js Image component for automatic optimization
import Image from 'next/image'
export function ProductImage({ product }: Props) {
return (
<Image
src={product.imageUrl}
alt={product.name}
width={300}
height={200}
priority={product.featured} // Load important images first
placeholder="blur"
blurDataURL="data:image/jpeg;base64,..." // Low quality placeholder
/>
)
}
// Preload critical resources
export function Layout() {
return (
<Head>
<link rel="preload" href="/fonts/inter.woff2" as="font" type="font/woff2" crossOrigin="" />
<link rel="preload" href="/api/dashboard/critical" as="fetch" />
</Head>
)
}
Backend Best Practices
API Design and Optimization
1. Efficient Data Fetching
// Use GraphQL or efficient REST patterns to avoid over-fetching
export async function getProductsAPI(request: NextRequest) {
const url = new URL(request.url)
const fields = url.searchParams.get('fields')?.split(',') || ['id', 'name', 'price']
// Only select requested fields
const selectClause = fields.map(field => `p.${field}`).join(', ')
const products = await db.query(
`SELECT ${selectClause} FROM products p WHERE p.active = true`,
[]
)
return NextResponse.json(products.rows)
}
// Implement pagination for large datasets
export async function getProductsPaginated(page: number = 1, limit: number = 20) {
const offset = (page - 1) * limit
const [products, totalCount] = await Promise.all([
db.query('SELECT * FROM products LIMIT $1 OFFSET $2', [limit, offset]),
db.query('SELECT COUNT(*) FROM products')
])
return {
products: products.rows,
pagination: {
page,
limit,
total: parseInt(totalCount.rows[0].count),
pages: Math.ceil(parseInt(totalCount.rows[0].count) / limit)
}
}
}
2. Batch Operations
// Batch database operations to reduce round trips
export async function updateMultipleProducts(updates: Array<{id: string, data: any}>) {
const client = await pool.connect()
try {
await client.query('BEGIN')
// Use batch update with VALUES clause
const values = updates.map((update, index) =>
`(${index * 3 + 1}, ${index * 3 + 2}, ${index * 3 + 3})`
).join(', ')
const params = updates.flatMap(update => [update.id, update.data.name, update.data.price])
await client.query(`
UPDATE products
SET name = batch.name, price = batch.price
FROM (VALUES ${values}) AS batch(id, name, price)
WHERE products.id = batch.id::uuid
`, params)
await client.query('COMMIT')
} catch (error) {
await client.query('ROLLBACK')
throw error
} finally {
client.release()
}
}
3. Response Optimization
// Compress responses
import { NextRequest, NextResponse } from 'next/server'
import { gzip } from 'zlib'
import { promisify } from 'util'
const gzipAsync = promisify(gzip)
export async function middleware(request: NextRequest) {
const response = NextResponse.next()
// Only compress if client accepts gzip
if (request.headers.get('accept-encoding')?.includes('gzip')) {
const contentType = response.headers.get('content-type')
if (contentType?.includes('application/json') || contentType?.includes('text/')) {
const body = await response.text()
const compressed = await gzipAsync(body)
return new NextResponse(compressed, {
status: response.status,
headers: {
...Object.fromEntries(response.headers),
'content-encoding': 'gzip',
'content-length': compressed.length.toString(),
}
})
}
}
return response
}
Database Best Practices
Query Optimization
1. Index Strategy
-- Create appropriate indexes for query patterns
-- Single column index for simple filters
CREATE INDEX idx_products_category ON products(category);
-- Composite index for multi-column queries
CREATE INDEX idx_products_category_status_created ON products(category, status, created_at);
-- Partial index for filtered queries
CREATE INDEX idx_products_active ON products(category) WHERE status = 'active';
-- Text search index
CREATE INDEX idx_products_search ON products USING GIN(to_tsvector('english', name || ' ' || description));
2. Query Patterns
// Efficient pagination with cursor-based approach
export async function getProductsCursor(cursor?: string, limit: number = 20) {
let query = 'SELECT * FROM products'
let params: any[] = [limit]
if (cursor) {
query += ' WHERE created_at < $2'
params.push(cursor)
}
query += ' ORDER BY created_at DESC LIMIT $1'
const result = await db.query(query, params)
return {
products: result.rows,
nextCursor: result.rows.length === limit
? result.rows[result.rows.length - 1].created_at
: null
}
}
// Use prepared statements for repeated queries
const getProductStatement = {
name: 'get-product',
text: 'SELECT * FROM products WHERE id = $1',
}
export async function getProduct(id: string) {
return db.query(getProductStatement, [id])
}
3. Connection Management
// Proper connection pool configuration
import { Pool } from 'pg'
export const pool = new Pool({
connectionString: process.env.DATABASE_URL,
max: 20, // Maximum number of clients
idleTimeoutMillis: 30000, // Close idle clients after 30 seconds
connectionTimeoutMillis: 2000, // Return error after 2 seconds if connection could not be established
maxUses: 7500, // Close a connection after it has been used 7500 times
})
// Monitor pool health
pool.on('error', (err) => {
console.error('Unexpected error on idle client', err)
process.exit(-1)
})
pool.on('connect', () => {
console.log('New client connected to database')
})
pool.on('remove', () => {
console.log('Client removed from pool')
})
Caching Best Practices
Multi-Level Caching Strategy
// Implement cache hierarchy
export class CacheHierarchy {
constructor(
private l1Cache: MemoryCache, // In-memory (fastest)
private l2Cache: RedisCache, // Distributed cache
private l3Source: DataSource // Original data source
) {}
async get(key: string) {
// L1: Check memory cache first
let value = this.l1Cache.get(key)
if (value !== null) {
return value
}
// L2: Check distributed cache
value = await this.l2Cache.get(key)
if (value !== null) {
// Populate L1 cache
this.l1Cache.set(key, value, 300) // 5 minutes
return value
}
// L3: Fetch from source
value = await this.l3Source.get(key)
if (value !== null) {
// Populate both cache levels
this.l1Cache.set(key, value, 300)
await this.l2Cache.set(key, value, 1800) // 30 minutes
}
return value
}
async invalidate(key: string) {
this.l1Cache.delete(key)
await this.l2Cache.delete(key)
}
}
Monitoring and Maintenance Best Practices
Performance Budgets
// Set and enforce performance budgets
export const PERFORMANCE_BUDGETS = {
// Frontend budgets
bundleSize: 250 * 1024, // 250KB
firstContentfulPaint: 1500, // 1.5 seconds
largestContentfulPaint: 2500, // 2.5 seconds
cumulativeLayoutShift: 0.1,
// Backend budgets
apiResponseTime: 200, // 200ms
databaseQueryTime: 100, // 100ms
memoryUsage: 512 * 1024 * 1024, // 512MB
}
// Monitor against budgets
export function checkPerformanceBudgets(metrics: any) {
const violations = []
if (metrics.bundleSize > PERFORMANCE_BUDGETS.bundleSize) {
violations.push(`Bundle size exceeded: ${metrics.bundleSize} > ${PERFORMANCE_BUDGETS.bundleSize}`)
}
if (metrics.apiResponseTime > PERFORMANCE_BUDGETS.apiResponseTime) {
violations.push(`API response time exceeded: ${metrics.apiResponseTime}ms > ${PERFORMANCE_BUDGETS.apiResponseTime}ms`)
}
return violations
}
Continuous Performance Testing
# .github/workflows/performance.yml
name: Performance Tests
on:
pull_request:
branches: [main]
schedule:
- cron: '0 2 * * *' # Daily at 2 AM
jobs:
performance:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
- name: Install dependencies
run: npm ci
- name: Build application
run: npm run build
- name: Run Lighthouse CI
run: |
npm install -g @lhci/cli
lhci collect --url=http://localhost:3000
lhci assert
- name: Run load tests
run: |
npm install -g artillery
artillery run load-test-config.yml
Security and Performance Balance
Secure Optimization Practices
// Rate limiting that doesn't impact legitimate users
export class AdaptiveRateLimit {
private attempts = new Map<string, { count: number; resetTime: number; reputation: number }>()
checkLimit(clientId: string, baseLimit: number = 100): boolean {
const now = Date.now()
const client = this.attempts.get(clientId)
if (!client || now > client.resetTime) {
this.attempts.set(clientId, {
count: 1,
resetTime: now + 60000, // 1 minute window
reputation: client?.reputation || 1.0
})
return true
}
// Adjust limit based on reputation
const adjustedLimit = Math.floor(baseLimit * client.reputation)
if (client.count >= adjustedLimit) {
// Decrease reputation for rate limit violations
client.reputation = Math.max(0.1, client.reputation * 0.9)
return false
}
client.count++
// Increase reputation for good behavior
if (client.count < adjustedLimit * 0.5) {
client.reputation = Math.min(2.0, client.reputation * 1.01)
}
return true
}
}
Following these best practices ensures your application performs well under various conditions while maintaining security, scalability, and maintainability.