Bubble Sort (very basic sorting technique)
Simple adjacent swaps; educational and easy to traceC++ Implementation
// Time: O(n^2) average/worst, Space: O(1)
#include <bits/stdc++.h>
using namespace std;
void bubbleSort(vector<int>& a) {
int n = (int)a.size();
bool swapped;
for (int i = 0; i < n - 1; ++i) {
swapped = false;
for (int j = 0; j < n - i - 1; ++j) {
if (a[j] > a[j+1]) {
swap(a[j], a[j+1]);
swapped = true;
}
}
if (!swapped) break; // already sorted
}
}
int main() {
vector<int> a = {5, 1, 4, 2, 8};
bubbleSort(a);
for (int x : a) cout << x << " ";
return 0;
}
Selection Sort
Find minimum and place at the start; fewer writesC++ Implementation
// Time: O(n^2) always, Space: O(1)
#include <bits/stdc++.h>
using namespace std;
void selectionSort(vector<int>& a) {
int n = (int)a.size();
for (int i = 0; i < n - 1; ++i) {
int minIdx = i;
for (int j = i + 1; j < n; ++j) {
if (a[j] < a[minIdx]) minIdx = j;
}
if (minIdx != i) swap(a[i], a[minIdx]);
}
}
int main() {
vector<int> a = {64, 25, 12, 22, 11};
selectionSort(a);
for (int x : a) cout << x << " ";
return 0;
}
Insertion Sort
Build sorted prefix; great on small/nearly-sorted dataC++ Implementation
// Time: O(n^2) average/worst, O(n) best, Space: O(1)
#include <bits/stdc++.h>
using namespace std;
void insertionSort(vector<int>& a) {
for (int i = 1; i < (int)a.size(); ++i) {
int key = a[i];
int j = i - 1;
while (j >= 0 && a[j] > key) {
a[j+1] = a[j];
--j;
}
a[j+1] = key;
}
}
int main() {
vector<int> a = {12, 11, 13, 5, 6};
insertionSort(a);
for (int x : a) cout << x << " ";
return 0;
}
Merge Sort
Divide–conquer–merge; predictable O(n log n)C++ Implementation
// Time: O(n log n) worst, Space: O(n)
#include <bits/stdc++.h>
using namespace std;
void mergeVec(vector<int>& a, int l, int m, int r) {
int n1 = m - l + 1, n2 = r - m;
vector<int> L(n1), R(n2);
for (int i = 0; i < n1; ++i) L[i] = a[l + i];
for (int j = 0; j < n2; ++j) R[j] = a[m + 1 + j];
int i = 0, j = 0, k = l;
while (i < n1 && j < n2) {
if (L[i] <= R[j]) a[k++] = L[i++];
else a[k++] = R[j++];
}
while (i < n1) a[k++] = L[i++];
while (j < n2) a[k++] = R[j++];
}
void mergeSort(vector<int>& a, int l, int r) {
if (l >= r) return;
int m = l + (r - l) / 2;
mergeSort(a, l, m);
mergeSort(a, m + 1, r);
mergeVec(a, l, m, r);
}
int main() {
vector<int> a = {12, 11, 13, 5, 6, 7};
mergeSort(a, 0, (int)a.size()-1);
for (int x : a) cout << x << " ";
return 0;
}
Quick Sort
Average O(n log n); pick pivots well to avoid O(n²)C++ Implementation
// Time: O(n log n) avg, O(n^2) worst; Space: O(log n)
#include <bits/stdc++.h>
using namespace std;
int partitionHoare(vector<int>& a, int l, int r) {
int pivot = a[l + (r - l)/2];
int i = l - 1, j = r + 1;
while (true) {
do { ++i; } while (a[i] < pivot);
do { --j; } while (a[j] > pivot);
if (i >= j) return j;
swap(a[i], a[j]);
}
}
void quickSort(vector<int>& a, int l, int r) {
if (l < r) {
int p = partitionHoare(a, l, r);
quickSort(a, l, p);
quickSort(a, p + 1, r);
}
}
int main() {
vector<int> a = {10, 7, 8, 9, 1, 5};
quickSort(a, 0, (int)a.size() - 1);
for (int x : a) cout << x << " ";
return 0;
}
Heap Sort
Max-heap + repeated extraction; O(1) extra memoryC++ Implementation
// Time: O(n log n), Space: O(1) extra
#include <bits/stdc++.h>
using namespace std;
void heapify(vector<int>& a, int n, int i) {
int largest = i;
int l = 2*i + 1, r = 2*i + 2;
if (l < n && a[l] > a[largest]) largest = l;
if (r < n && a[r] > a[largest]) largest = r;
if (largest != i) {
swap(a[i], a[largest]);
heapify(a, n, largest);
}
}
void heapSort(vector<int>& a) {
int n = (int)a.size();
for (int i = n/2 - 1; i >= 0; --i) heapify(a, n, i);
for (int i = n - 1; i > 0; --i) {
swap(a, a[i]);
heapify(a, i, 0);
}
}
int main() {
vector<int> a = {12, 11, 13, 5, 6, 7};
heapSort(a);
for (int x : a) cout << x << " ";
return 0;
}
Commonly Asked Data Structure Interview Questions on Sorting
Sorting is a fundamental concept in computer science and data structures, often tested in technical interviews. Sorting algorithms are essential for organizing data in a specific order, whether ascending or descending. Understanding various sorting techniques—like Quick Sort, Merge Sort, Bubble Sort, and Insertion Sort—helps solve many problems efficiently.
Top Sorting Interview Questions and Problems
1. What is Bubble Sort, and when would it be used?
Bubble Sort repeatedly swaps adjacent elements if they are in the wrong order. It is simple but inefficient for large datasets with time complexity O(n²); useful mainly for teaching or tiny, nearly-sorted inputs.
2. Explain the concept of Selection Sort.
Selection Sort finds the smallest (or largest) element in the unsorted region and swaps it with the first unsorted position. It performs O(n²) comparisons and is not stable in the standard form.
3. What makes Insertion Sort efficient for small datasets?
Insertion Sort builds the final sorted list one element at a time and runs in O(n) on nearly-sorted data. It has O(n²) worst-case time but very low constant factors and is stable.
4. Difference between Merge Sort and Quick Sort?
Merge Sort is stable and guarantees O(n log n) time with O(n) extra space. Quick Sort is typically faster in practice and in-place on average O(n log n), but can degrade to O(n²) on bad pivots.
5. What is the time complexity of Radix Sort?
Radix Sort can run in O(nk), where n is the number of elements and k is the number of digits/bits per element, assuming counting by digits/buckets is linear.
6. Why is Quick Sort often faster than Merge Sort in practice?
Quick Sort has good cache locality and typically uses less auxiliary memory by operating in-place, reducing overhead. With randomized or median-of-three pivots, it avoids many worst-case patterns.
7. When is it better to use Heap Sort?
Heap Sort guarantees O(n log n) time and uses O(1) extra space, making it suitable when memory is constrained or worst-case guarantees are required.
8. How does Counting Sort work?
Counting Sort counts occurrences of each key in a known small integer range, computes prefix sums to determine positions, and writes elements into the output in linear time.
9. What is a stable sorting algorithm? Example?
A stable algorithm preserves the relative order of equal keys; for example, Merge Sort is stable when implemented with auxiliary arrays.
10. Worst case for Quick Sort and how to avoid?
Worst case occurs when pivots are consistently min/max, giving O(n²). Use randomized pivot selection or median-of-three to reduce this risk.
11. Explain Bucket Sort and its application.
Bucket Sort partitions input into buckets covering value ranges, sorts each bucket (often with insertion sort), then concatenates; effective for uniform distributions.
12. Can Quick Sort be used on linked lists?
Yes, but pointer-based partitioning is required and Merge Sort is usually preferred for linked lists due to efficient splitting and merging without random access.
13. Significance of the Partition step in Quick Sort?
Partition places the pivot in its final position and ensures left side contains smaller elements and right side larger ones, enabling divide-and-conquer.
14. What is K-way Merge Sort and when is it useful?
K-way merge generalizes merge sort by merging K sorted lists at once; useful for external sorting and large datasets stored on disks.