close

100+ Python AI Programs to Kickstart Your Career in Artificial Intelligence

@mritxperts August 24, 2025 No Comments
100+ Python AI Programs to Kickstart Your Career in Artificial Intelligence

1. Hello AI – Print “Hello AI World”

# Program 1: Hello AI World
print("Hello AI World")

2. Fibonacci Sequence Generator (Recursion vs Iteration)

# Recursive Fibonacci
def fib_recursive(n):
    if n <= 1:
        return n
    return fib_recursive(n-1) + fib_recursive(n-2)

# Iterative Fibonacci
def fib_iterative(n):
    a, b = 0, 1
    seq = []
    for _ in range(n):
        seq.append(a)
        a, b = b, a + b
    return seq

print("Recursive 5th Fibonacci:", fib_recursive(5))
print("Iterative first 10 Fibonacci:", fib_iterative(10))

3. Tic-Tac-Toe with Minimax AI

# Simple Tic-Tac-Toe with AI using Minimax
import math

def print_board(board):
    for row in [board[i*3:(i+1)*3] for i in range(3)]:
        print('| ' + ' | '.join(row) + ' |')

def available_moves(board):
    return [i for i, spot in enumerate(board) if spot == " "]

def winner(board):
    win_cond = [(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)]
    for (x,y,z) in win_cond:
        if board[x] == board[y] == board[z] != " ":
            return board[x]
    return None

def minimax(board, depth, is_maximizing):
    win = winner(board)
    if win == "O": return 1
    elif win == "X": return -1
    elif " " not in board: return 0
    
    if is_maximizing:
        best = -math.inf
        for move in available_moves(board):
            board[move] = "O"
            score = minimax(board, depth+1, False)
            board[move] = " "
            best = max(best, score)
        return best
    else:
        best = math.inf
        for move in available_moves(board):
            board[move] = "X"
            score = minimax(board, depth+1, True)
            board[move] = " "
            best = min(best, score)
        return best

def best_move(board):
    best_score = -math.inf
    move = None
    for i in available_moves(board):
        board[i] = "O"
        score = minimax(board, 0, False)
        board[i] = " "
        if score > best_score:
            best_score = score
            move = i
    return move

board = [" "]*9
board[0] = "X"  # Example: Human move
ai_move = best_move(board)
board[ai_move] = "O"

print_board(board)

4. Rock-Paper-Scissors AI (Random + Rule-based)

import random

choices = ["rock", "paper", "scissors"]

def rps_ai(user_choice):
    # Rule-based: beat user's last move
    if user_choice == "rock":
        return "paper"
    elif user_choice == "paper":
        return "scissors"
    else:
        return "rock"

user = random.choice(choices)
ai = rps_ai(user)

print(f"User: {user} | AI: {ai}")

5. Number Guessing Game with AI Hints

import random

number = random.randint(1, 50)
guess = 0
while guess != number:
    guess = int(input("Guess a number (1-50): "))
    if guess < number:
        print("AI Hint: Try higher!")
    elif guess > number:
        print("AI Hint: Try lower!")
print("🎉 Correct! The number was", number)

6. Simple Chatbot (If-Else)

def simple_chatbot(user_input):
    if "hi" in user_input.lower():
        return "Hello! How can I help you?"
    elif "weather" in user_input.lower():
        return "It’s always sunny in AI land 😎"
    elif "bye" in user_input.lower():
        return "Goodbye! Have a great day!"
    else:
        return "I don't understand, but I'm learning!"

print(simple_chatbot("hi"))
print(simple_chatbot("what's the weather?"))

7. Magic 8-ball Predictor

import random

responses = [
    "Yes, definitely!",
    "No, not really.",
    "Maybe, try again later.",
    "It looks promising.",
    "I wouldn’t count on it."
]

def magic_8_ball():
    return random.choice(responses)

print("🎱 Magic 8-ball says:", magic_8_ball())

8. Spam vs Ham Classifier (Naive Bayes with sample data)

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB

# Sample dataset
texts = ["Win money now", "Cheap loans available", "Hello friend", "Let’s catch up soon"]
labels = ["spam", "spam", "ham", "ham"]

vectorizer = CountVectorizer()
X = vectorizer.fit_transform(texts)

model = MultinomialNB()
model.fit(X, labels)

test = ["free money", "hi buddy"]
X_test = vectorizer.transform(test)

print(model.predict(X_test))

9. Word Auto-completion (Dictionary-based)

dictionary = ["apple", "application", "banana", "band", "bandwidth"]

def autocomplete(prefix):
    return [word for word in dictionary if word.startswith(prefix)]

print(autocomplete("app"))
print(autocomplete("ban"))

10. Predicting Even/Odd Numbers (Trivial AI)

def predict_even_odd(number):
    return "Even" if number % 2 == 0 else "Odd"

print("5 is", predict_even_odd(5))
print("10 is", predict_even_odd(10))

Machine Learning Basics Programs (15)

1. Linear Regression – Predict house prices

from sklearn.linear_model import LinearRegression
import numpy as np

X = np.array([[1000], [1500], [2000], [2500], [3000]])  # size in sqft
y = np.array([200000, 300000, 400000, 500000, 600000])  # price

model = LinearRegression()
model.fit(X, y)

print("Prediction for 2200 sqft:", model.predict([[2200]])[0])

2. Logistic Regression – Predict pass/fail

from sklearn.linear_model import LogisticRegression
import numpy as np

X = np.array([[35], [50], [65], [75], [90]])  # marks
y = np.array([0, 0, 1, 1, 1])  # 0 = fail, 1 = pass

model = LogisticRegression()
model.fit(X, y)

print("Prediction for 55 marks:", model.predict([[55]])[0])

3. Decision Tree – Predict weather (play/not play)

from sklearn.tree import DecisionTreeClassifier

X = [[85, 85], [80, 90], [72, 95], [69, 70], [75, 80]]  # temp, humidity
y = ["No", "No", "Yes", "Yes", "Yes"]

model = DecisionTreeClassifier()
model.fit(X, y)

print("Play decision for (72 temp, 80 humidity):", model.predict([[72, 80]])[0])

4. KNN – Classify fruits

from sklearn.neighbors import KNeighborsClassifier

X = [[150, 7], [170, 7], [140, 6], [130, 6]]  # weight, size
y = ["Apple", "Apple", "Orange", "Orange"]

model = KNeighborsClassifier(n_neighbors=3)
model.fit(X, y)

print("Prediction for fruit (160g, size 7):", model.predict([[160, 7]])[0])

5. Random Forest – Student performance prediction

from sklearn.ensemble import RandomForestClassifier

X = [[50], [60], [70], [80], [90]]
y = ["Fail", "Pass", "Pass", "Pass", "Pass"]

model = RandomForestClassifier()
model.fit(X, y)

print("Prediction for marks 65:", model.predict([[65]])[0])

6. SVM – Classify email spam/ham

from sklearn import svm

X = [[3], [1], [4], [5], [0]]  # word counts of "free"
y = ["Spam", "Ham", "Spam", "Spam", "Ham"]

model = svm.SVC()
model.fit(X, y)

print("Prediction for email with 2 'free' words:", model.predict([[2]])[0])

7. K-Means Clustering – Customer segmentation

from sklearn.cluster import KMeans
import numpy as np

X = np.array([[25, 40000], [30, 50000], [35, 60000], [40, 80000], [45, 90000]])

kmeans = KMeans(n_clusters=2, n_init=10)
kmeans.fit(X)

print("Cluster labels:", kmeans.labels_)

8. Hierarchical Clustering – Group animals by features

from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.pyplot as plt

X = [[1, 1], [1.5, 1.5], [5, 5], [6, 6]]

Z = linkage(X, 'ward')
dendrogram(Z)
plt.show()

9. Polynomial Regression – Predict salary growth

import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression

X = np.array([[1], [2], [3], [4], [5]])
y = np.array([2000, 4000, 7000, 11000, 15000])

poly = PolynomialFeatures(degree=2)
X_poly = poly.fit_transform(X)

model = LinearRegression()
model.fit(X_poly, y)

print("Prediction for 6 years exp:", model.predict(poly.transform([[6]]))[0])

10. Ridge and Lasso Regression

from sklearn.linear_model import Ridge, Lasso
import numpy as np

X = np.array([[1], [2], [3], [4], [5]])
y = np.array([1, 2, 3, 4, 5])

ridge = Ridge(alpha=1.0)
ridge.fit(X, y)

lasso = Lasso(alpha=0.1)
lasso.fit(X, y)

print("Ridge prediction for 6:", ridge.predict([[6]])[0])
print("Lasso prediction for 6:", lasso.predict([[6]])[0])

11. Gradient Descent implementation from scratch

import numpy as np

X = np.array([1, 2, 3, 4, 5])
y = np.array([2, 4, 6, 8, 10])  # y = 2x

m, b = 0, 0  # initial slope and intercept
lr = 0.01

for _ in range(1000):
    y_pred = m * X + b
    dm = -2 * sum(X * (y - y_pred)) / len(X)
    db = -2 * sum(y - y_pred) / len(X)
    m -= lr * dm
    b -= lr * db

print("Learned equation: y =", m, "x +", b)

12. Train/test split and cross-validation demo

from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LinearRegression
import numpy as np

X = np.array([[i] for i in range(1, 11)])
y = np.array([2*i for i in range(1, 11)])

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

model = LinearRegression()
scores = cross_val_score(model, X, y, cv=5)

print("Cross-validation scores:", scores)

13. Overfitting vs. underfitting visualization

import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures

X = np.array([[i] for i in range(1, 11)])
y = np.array([2*i + np.random.randint(-3, 3) for i in range(1, 11)])

poly = PolynomialFeatures(degree=8)
X_poly = poly.fit_transform(X)
model = LinearRegression().fit(X_poly, y)

plt.scatter(X, y, color='blue')
plt.plot(X, model.predict(X_poly), color='red')
plt.title("Overfitting Example")
plt.show()

14. Feature scaling (normalization vs. standardization)

from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np

data = np.array([[10], [20], [30], [40], [50]])

norm = MinMaxScaler()
std = StandardScaler()

print("Normalization:", norm.fit_transform(data).flatten())
print("Standardization:", std.fit_transform(data).flatten())

15. Model accuracy comparison

from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np

X = np.array([[i] for i in range(1, 11)])
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 1])

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

models = {
    "Logistic Regression": LogisticRegression(),
    "Decision Tree": DecisionTreeClassifier(),
    "KNN": KNeighborsClassifier()
}

for name, model in models.items():
    model.fit(X_train, y_train)
    pred = model.predict(X_test)
    print(name, "Accuracy:", accuracy_score(y_test, pred))

Deep Learning Programs (15)

# 1. Neural Network from scratch (NumPy only)
import numpy as np

X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])

np.random.seed(42)
W1 = np.random.randn(2,2)
b1 = np.zeros((1,2))
W2 = np.random.randn(2,1)
b2 = np.zeros((1,1))

def sigmoid(x): return 1/(1+np.exp(-x))
def sigmoid_deriv(x): return x*(1-x)

for epoch in range(10000):
h = sigmoid(np.dot(X,W1)+b1)
out = sigmoid(np.dot(h,W2)+b2)
loss = np.mean((y-out)**2)
d_out = (y-out)*sigmoid_deriv(out)
d_h = d_out.dot(W2.T)*sigmoid_deriv(h)
W2 += h.T.dot(d_out)*0.1
b2 += np.sum(d_out,axis=0,keepdims=True)*0.1
W1 += X.T.dot(d_h)*0.1
b1 += np.sum(d_h,axis=0,keepdims=True)*0.1
print("Predictions:", out.round())


# 2. Perceptron Learning Rule Demo
import numpy as np

X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([0,0,0,1]) # AND logic
W = np.zeros(2)
b = 0

for epoch in range(10):
for xi, target in zip(X,y):
pred = np.where(np.dot(xi,W)+b >= 0,1,0)
W += (target-pred)*xi
b += (target-pred)
print("Weights:", W, "Bias:", b)
# 3. XOR problem solved with NN (Keras)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import numpy as np

X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([0,1,1,0])

model = Sequential([
    Dense(4,activation='relu',input_dim=2),
    Dense(1,activation='sigmoid')
])
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(X,y,epochs=500,verbose=0)
print("Predictions:", model.predict(X).round())
# 4. MNIST digit recognition
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten

(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train,x_test = x_train/255.0, x_test/255.0

model = Sequential([
    Flatten(input_shape=(28,28)),
    Dense(128,activation='relu'),
    Dense(10,activation='softmax')
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train,y_train,epochs=5,validation_split=0.1)
print("Test accuracy:", model.evaluate(x_test,y_test)[1])
# 5. CIFAR-10 classification
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Flatten,Dense

(x_train,y_train),(x_test,y_test) = cifar10.load_data()
x_train,x_test = x_train/255.0, x_test/255.0

model = Sequential([
    Conv2D(32,(3,3),activation='relu',input_shape=(32,32,3)),
    MaxPooling2D(2,2),
    Flatten(),
    Dense(64,activation='relu'),
    Dense(10,activation='softmax')
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train,y_train,epochs=3,validation_split=0.1)
print("Accuracy:", model.evaluate(x_test,y_test)[1])
# 6. Fashion-MNIST classification
from tensorflow.keras.datasets import fashion_mnist
(x_train,y_train),(x_test,y_test) = fashion_mnist.load_data()
x_train,x_test = x_train/255.0, x_test/255.0

model = Sequential([
    Flatten(input_shape=(28,28)),
    Dense(128,activation='relu'),
    Dense(10,activation='softmax')
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train,y_train,epochs=5,validation_split=0.1)
print("Test Accuracy:", model.evaluate(x_test,y_test)[1])
# 7. Cat vs Dog Classifier (CNN, simplified with TF dataset)
import tensorflow as tf
import tensorflow_datasets as tfds

dataset, info = tfds.load("cats_vs_dogs",as_supervised=True,with_info=True)
def preprocess(img,label): return tf.image.resize(img,(128,128))/255.0,label
train = dataset['train'].map(preprocess).batch(32)

model = Sequential([
    Conv2D(32,(3,3),activation='relu',input_shape=(128,128,3)),
    MaxPooling2D(2,2),
    Flatten(),
    Dense(64,activation='relu'),
    Dense(1,activation='sigmoid')
])
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(train,epochs=1,steps_per_epoch=100) # quick demo
# 8. Sentiment Analysis on IMDB dataset (LSTM)
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding,LSTM,Dense

(x_train,y_train),(x_test,y_test) = imdb.load_data(num_words=10000)
x_train = pad_sequences(x_train,maxlen=200)
x_test = pad_sequences(x_test,maxlen=200)

model = Sequential([
    Embedding(10000,128,input_length=200),
    LSTM(64),
    Dense(1,activation='sigmoid')
])
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(x_train,y_train,epochs=2,batch_size=64,validation_split=0.1)
print("Accuracy:", model.evaluate(x_test,y_test)[1])
# 9. Stock price prediction with LSTM (dummy data)
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense

data = np.sin(np.arange(1000)) # fake stock trend
X,y = [],[]
for i in range(20,len(data)):
    X.append(data[i-20:i])
    y.append(data[i])
X,y = np.array(X),np.array(y)
X = X.reshape((X.shape[0],X.shape[1],1))

model = Sequential([LSTM(50,input_shape=(20,1)),Dense(1)])
model.compile(optimizer='adam',loss='mse')
model.fit(X,y,epochs=5,verbose=1)
# 10. Image Colorization with Autoencoder (simplified)
from tensorflow.keras.layers import Conv2DTranspose,Reshape,Input
from tensorflow.keras.models import Model

inp = Input((28,28,1))
x = Conv2D(32,(3,3),activation='relu',padding='same')(inp)
x = Flatten()(x)
x = Dense(28*28*3,activation='sigmoid')(x)
out = Reshape((28,28,3))(x)

model = Model(inp,out)
model.compile(optimizer='adam',loss='mse')
print(model.summary())
# 11. Noise Reduction Autoencoder
from tensorflow.keras.layers import Conv2D,UpSampling2D,MaxPooling2D
inp = Input((28,28,1))
x = Conv2D(32,(3,3),activation='relu',padding='same')(inp)
x = MaxPooling2D((2,2),padding='same')(x)
x = Conv2D(32,(3,3),activation='relu',padding='same')(x)
encoded = MaxPooling2D((2,2),padding='same')(x)

x = Conv2D(32,(3,3),activation='relu',padding='same')(encoded)
x = UpSampling2D((2,2))(x)
x = Conv2D(32,(3,3),activation='relu',padding='same')(x)
x = UpSampling2D((2,2))(x)
decoded = Conv2D(1,(3,3),activation='sigmoid',padding='same')(x)

autoencoder = Model(inp,decoded)
autoencoder.compile(optimizer='adam',loss='binary_crossentropy')
print(autoencoder.summary())
# 12. Transfer Learning with ResNet50
from tensorflow.keras.applications import ResNet50
base = ResNet50(weights='imagenet',include_top=False,input_shape=(224,224,3))
model = Sequential([base,Flatten(),Dense(10,activation='softmax')])
for layer in base.layers: layer.trainable=False
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
print(model.summary())
# 13. Object Detection (YOLOv8 with Ultralytics)
from ultralytics import YOLO
model = YOLO("yolov8n.pt")  # pretrained
results = model("https://ultralytics.com/images/bus.jpg")
results.show()
# 14. Image Caption Generator (simplified, placeholder)
print("Requires large dataset (COCO). Example pipeline:")
print("1. Extract CNN features (InceptionV3).")
print("2. Train LSTM on captions.")
print("3. Combine CNN+LSTM for caption generation.")
# 15. GAN (Generate handwritten digits)
import tensorflow as tf
from tensorflow.keras.layers import Dense,LeakyReLU,Reshape,Flatten
from tensorflow.keras.models import Sequential

# Generator
gen = Sequential([
    Dense(128,input_dim=100),LeakyReLU(0.2),
    Dense(784,activation='tanh'),Reshape((28,28,1))
])

# Discriminator
disc = Sequential([
    Flatten(input_shape=(28,28,1)),
    Dense(128),LeakyReLU(0.2),
    Dense(1,activation='sigmoid')
])

disc.compile(optimizer='adam',loss='binary_crossentropy')
disc.trainable=False
gan = Sequential([gen,disc])
gan.compile(optimizer='adam',loss='binary_crossentropy')

print("GAN built. Train on MNIST for real results.")

Natural Language Processing (NLP) (15 Programs)


1. Tokenization & Stopword Removal

import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize

nltk.download("punkt")
nltk.download("stopwords")

text = "Itxperts provides the best website development services in India."
tokens = word_tokenize(text.lower())
filtered = [w for w in tokens if w.isalpha() and w not in stopwords.words("english")]

print("Original:", tokens)
print("After stopword removal:", filtered)

2. Word Frequency Counter

from collections import Counter
text = "ChatGPT helps developers. Developers use ChatGPT for coding help."
words = text.lower().split()
freq = Counter(words)
print(freq)

3. Bag-of-Words Model for Text Classification

from sklearn.feature_extraction.text import CountVectorizer

docs = ["I love coding", "Coding is fun", "I dislike bugs"]
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(docs)

print("Features:", vectorizer.get_feature_names_out())
print("Bag of Words Matrix:\n", X.toarray())

4. TF-IDF Implementation

from sklearn.feature_extraction.text import TfidfVectorizer

docs = ["ChatGPT is amazing", "ChatGPT helps in AI", "AI is the future"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(docs)

print("Features:", vectorizer.get_feature_names_out())
print("TF-IDF Matrix:\n", X.toarray())

5. Sentiment Analysis using TextBlob

from textblob import TextBlob

text = "I really love the services of Itxperts!"
blob = TextBlob(text)
print("Sentiment polarity:", blob.sentiment.polarity)

6. Named Entity Recognition (spaCy)

import spacy
nlp = spacy.load("en_core_web_sm")

text = "Elon Musk founded SpaceX in California."
doc = nlp(text)

for ent in doc.ents:
    print(ent.text, ent.label_)

7. Text Summarizer (Extractive)

import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from collections import Counter

nltk.download("punkt")

text = """Itxperts is a leading web development company. 
We create websites, apps, and AI solutions for businesses worldwide. 
Our mission is to empower businesses through technology."""

sentences = sent_tokenize(text)
words = word_tokenize(text.lower())
freq = Counter(words)

scores = {}
for sent in sentences:
    for word in word_tokenize(sent.lower()):
        if word in freq:
            scores[sent] = scores.get(sent, 0) + freq[word]

summary = sorted(scores, key=scores.get, reverse=True)[:2]
print("Summary:", " ".join(summary))

8. Fake News Detection (ML Model)

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB

docs = ["This is a real news article", "Breaking!!! Aliens landed on Earth"]
labels = [1, 0]  # 1=real, 0=fake

vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(docs)

model = MultinomialNB()
model.fit(X, labels)

test = ["Aliens are coming tomorrow!"]
print("Prediction (1=real,0=fake):", model.predict(vectorizer.transform(test))[0])

9. Chatbot with RNN/LSTM

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

model = Sequential([
    Embedding(1000, 64),
    LSTM(64),
    Dense(1, activation="sigmoid")
])

model.compile(optimizer="adam", loss="binary_crossentropy")
print("Chatbot RNN/LSTM model created!")

10. Transformer Model with Hugging Face

from transformers import pipeline

qa = pipeline("question-answering")
context = "Itxperts is a company that builds websites and AI solutions."
question = "What does Itxperts build?"
print(qa(question=question, context=context))

11. Next-Word Prediction (Markov Chains)

import random

text = "I love coding in Python. Python is great for machine learning."
words = text.split()
markov = {}

for i in range(len(words)-1):
    w1, w2 = words[i], words[i+1]
    markov.setdefault(w1, []).append(w2)

word = "Python"
for _ in range(5):
    next_word = random.choice(markov.get(word, ["END"]))
    print(word, end=" ")
    word = next_word

12. Topic Modeling (LDA)

from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer

docs = ["AI is the future", "I love AI and machine learning", "Politics and government news"]
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(docs)

lda = LatentDirichletAllocation(n_components=2, random_state=0)
lda.fit(X)

for idx, topic in enumerate(lda.components_):
    print("Topic", idx, [vectorizer.get_feature_names_out()[i] for i in topic.argsort()[-3:]])

13. Question-Answering System

from transformers import pipeline

qa = pipeline("question-answering")
context = "Barack Obama was the 44th president of the United States."
question = "Who was the 44th president of the USA?"
print(qa(question=question, context=context))

14. Language Translation (Seq2Seq Model)

from transformers import pipeline

translator = pipeline("translation_en_to_fr")
print(translator("Itxperts develops modern websites", max_length=40))

15. Speech-to-Text & Text-to-Speech

import pyttsx3
import speech_recognition as sr

# Text-to-Speech
engine = pyttsx3.init()
engine.say("Hello, welcome to Itxperts!")
engine.runAndWait()

# Speech-to-Text
r = sr.Recognizer()
with sr.Microphone() as source:
    print("Speak something...")
    audio = r.listen(source)
    try:
        print("You said:", r.recognize_google(audio))
    except:
        print("Could not understand audio")

Computer Vision Programs (15 with Code)

1. Face Detection with OpenCV

import cv2

# Load pre-trained face detector
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
    
    cv2.imshow('Face Detection', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

2. Smile Detector with OpenCV

import cv2

face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
smile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_smile.xml")

cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    for (x, y, w, h) in faces:
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]
        smiles = smile_cascade.detectMultiScale(roi_gray, 1.8, 20)
        
        for (sx, sy, sw, sh) in smiles:
            cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 255, 0), 2)

    cv2.imshow('Smile Detector', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

3. Age & Gender Prediction from Images

(Uses a pre-trained deep learning model from OpenCV)

import cv2

# Load models
age_proto = "deploy_age.prototxt"
age_model = "age_net.caffemodel"
gender_proto = "deploy_gender.prototxt"
gender_model = "gender_net.caffemodel"

age_net = cv2.dnn.readNet(age_model, age_proto)
gender_net = cv2.dnn.readNet(gender_model, gender_proto)

MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
age_list = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
gender_list = ['Male', 'Female']

cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()
    blob = cv2.dnn.blobFromImage(frame, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
    
    gender_net.setInput(blob)
    gender = gender_list[gender_net.forward().argmax()]
    
    age_net.setInput(blob)
    age = age_list[age_net.forward().argmax()]
    
    cv2.putText(frame, f"{gender}, {age}", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
    cv2.imshow("Age & Gender Prediction", frame)
    
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

4. Real-time Face Recognition

# pip install face_recognition opencv-python
import face_recognition, cv2

video = cv2.VideoCapture(0)

known_img = face_recognition.load_image_file("known.jpg")    # supply a face image
known_enc = face_recognition.face_encodings(known_img)[0]

while True:
    ok, frame = video.read()
    if not ok: break
    rgb = frame[:, :, ::-1]
    locs = face_recognition.face_locations(rgb)
    encs = face_recognition.face_encodings(rgb, locs)

    for (top, right, bottom, left), enc in zip(locs, encs):
        match = face_recognition.compare_faces([known_enc], enc)[0]
        name = "Known" if match else "Unknown"
        cv2.rectangle(frame, (left, top), (right, bottom), (0,255,0), 2)
        cv2.putText(frame, name, (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)

    cv2.imshow("Face Recognition", frame)
    if cv2.waitKey(1) & 0xFF == ord("q"): break

video.release(); cv2.destroyAllWindows()

5. Eye Blink Detection (Drowsiness Detection)

# pip install dlib imutils scipy opencv-python
import cv2, dlib
from scipy.spatial import distance as dist

def eye_aspect_ratio(eye):
    A = dist.euclidean(eye[1], eye[5]); B = dist.euclidean(eye[2], eye[4])
    C = dist.euclidean(eye[0], eye[3])
    return (A + B) / (2.0 * C)

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")  # download file

EYE_AR_THRESH, EYE_AR_CONSEC_FRAMES = 0.25, 3
counter = 0

cap = cv2.VideoCapture(0)
while True:
    ok, frame = cap.read()
    if not ok: break
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    for rect in detector(gray):
        shape = predictor(gray, rect)
        points = [(shape.part(i).x, shape.part(i).y) for i in range(68)]

        left = [points[i] for i in [36,37,38,39,40,41]]
        ear = eye_aspect_ratio(left)
        if ear < EYE_AR_THRESH:
            counter += 1
            if counter >= EYE_AR_CONSEC_FRAMES:
                cv2.putText(frame, "DROWSY!", (30,60), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,0,255), 3)
        else:
            counter = 0

    cv2.imshow("Blink / Drowsiness", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

6. Object Tracking (OpenCV)

# OpenCV built-in trackers: "CSRT" is accurate
import cv2

cap = cv2.VideoCapture(0)
ok, frame = cap.read()
bbox = cv2.selectROI("Select Object", frame, False, False)
tracker = cv2.legacy.TrackerCSRT_create()
tracker.init(frame, bbox)

while True:
    ok, frame = cap.read()
    if not ok: break
    ok, box = tracker.update(frame)
    if ok:
        x, y, w, h = [int(v) for v in box]
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)
    else:
        cv2.putText(frame, "Tracking lost", (20,40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2)
    cv2.imshow("Object Tracking (CSRT)", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

7. Background Removal (Segmentation via GrabCut)

import cv2, numpy as np

img = cv2.imread("person.jpg")  # subject against relatively simple background
mask = np.zeros(img.shape[:2], np.uint8)
bgModel = np.zeros((1,65), np.float64)
fgModel = np.zeros((1,65), np.float64)

rect = (10, 10, img.shape[1]-20, img.shape[0]-20)  # initial rectangle
cv2.grabCut(img, mask, rect, bgModel, fgModel, 5, cv2.GC_INIT_WITH_RECT)

mask2 = np.where((mask==2)|(mask==0), 0, 1).astype('uint8')
result = img * mask2[:, :, np.newaxis]
cv2.imwrite("foreground.png", result)
print("Saved: foreground.png")

8. QR Code & Barcode Scanner

# Option A: OpenCV QRCodeDetector (QR only)
import cv2
detector = cv2.QRCodeDetector()
cap = cv2.VideoCapture(0)

while True:
    ok, frame = cap.read()
    if not ok: break
    data, pts, _ = detector.detectAndDecode(frame)
    if data:
        cv2.putText(frame, f"QR: {data}", (20,40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)
    cv2.imshow("QR Scanner", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

# Option B (barcodes too): pip install pyzbar
# from pyzbar.pyzbar import decode; for obj in decode(frame): print(obj.data.decode())

9. Hand Gesture Recognition (Counting Fingers with MediaPipe)

# pip install mediapipe opencv-python
import cv2, mediapipe as mp

hands = mp.solutions.hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5)
draw = mp.solutions.drawing_utils

cap = cv2.VideoCapture(0)
while True:
    ok, frame = cap.read()
    if not ok: break
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    res = hands.process(rgb)
    fingers_up = 0

    if res.multi_hand_landmarks:
        for handLms in res.multi_hand_landmarks:
            lm = handLms.landmark
            # Simple heuristic: tip above pip for fingers (index–pinky). Thumb uses x-coord.
            tip_ids = [4, 8, 12, 16, 20]
            if lm[tip_ids[0]].x < lm[tip_ids[0]-1].x: fingers_up += 1  # thumb (for right hand)
            for i in range(1,5):
                if lm[tip_ids[i]].y < lm[tip_ids[i]-2].y: fingers_up += 1
            draw.draw_landmarks(frame, handLms, mp.solutions.hands.HAND_CONNECTIONS)

    cv2.putText(frame, f"Fingers: {fingers_up}", (20,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
    cv2.imshow("Hand Gesture (MediaPipe)", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

10. Emotion Detection from Face

# pip install fer opencv-python
from fer import FER
import cv2

detector = FER()  # uses MTCNN/haar internally if available
cap = cv2.VideoCapture(0)

while True:
    ok, frame = cap.read()
    if not ok: break
    results = detector.detect_emotions(frame)
    for r in results:
        (x,y,w,h) = r["box"]
        emotion, score = max(r["emotions"].items(), key=lambda x: x[1])
        cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
        cv2.putText(frame, f"{emotion} {score:.2f}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,255,0),2)

    cv2.imshow("Emotion Detection", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

11. OCR (Optical Character Recognition) with Tesseract

# pip install pytesseract pillow opencv-python
# Install Tesseract engine separately and set the path if required.
import cv2, pytesseract

img = cv2.imread("text_image.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
text = pytesseract.image_to_string(gray, lang="eng")
print(text)

12. License Plate Recognition System (Simple)

# pip install imutils pytesseract opencv-python
# You may also use a pretrained cascade for plates or YOLO for better detection.
import cv2, pytesseract

img = cv2.imread("car.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edges = cv2.Canny(gray, 30, 200)

contours, _ = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]

plate = None
for c in contours:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.018 * peri, True)
    if len(approx) == 4:
        x,y,w,h = cv2.boundingRect(approx)
        plate = img[y:y+h, x:x+w]
        break

if plate is not None:
    plate_gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
    text = pytesseract.image_to_string(plate_gray, config="--psm 7")
    print("Plate:", text.strip())
else:
    print("Plate not found")

13. Lane Detection for Self-Driving Cars

import cv2, numpy as np

def region_of_interest(img):
    h, w = img.shape[:2]
    mask = np.zeros_like(img)
    polygon = np.array([[(0,h),(w//2, int(h*0.6)), (w, h)]], dtype=np.int32)
    cv2.fillPoly(mask, polygon, 255)
    return cv2.bitwise_and(img, mask)

cap = cv2.VideoCapture("road.mp4")  # or 0 for webcam

while True:
    ok, frame = cap.read()
    if not ok: break
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5,5), 0)
    edges = cv2.Canny(blur, 50, 150)
    roi = region_of_interest(edges)
    lines = cv2.HoughLinesP(roi, 1, np.pi/180, threshold=50, minLineLength=50, maxLineGap=150)
    if lines is not None:
        for l in lines:
            x1,y1,x2,y2 = l[0]
            cv2.line(frame, (x1,y1), (x2,y2), (0,255,0), 4)
    cv2.imshow("Lane Detection", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

14. Pose Estimation (MediaPipe)

# pip install mediapipe opencv-python
import cv2, mediapipe as mp

pose = mp.solutions.pose.Pose()
draw = mp.solutions.drawing_utils

cap = cv2.VideoCapture(0)
while True:
    ok, frame = cap.read()
    if not ok: break
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    res = pose.process(rgb)
    if res.pose_landmarks:
        draw.draw_landmarks(frame, res.pose_landmarks, mp.solutions.pose.POSE_CONNECTIONS)
    cv2.imshow("Pose Estimation", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'): break

cap.release(); cv2.destroyAllWindows()

15. Image Similarity Detection (Feature Matching with ORB)

import cv2

img1 = cv2.imread("image1.jpg", 0)
img2 = cv2.imread("image2.jpg", 0)

orb = cv2.ORB_create(1000)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)

similarity_score = sum([m.distance for m in matches[:50]]) / max(1, len(matches[:50]))
print("Lower is more similar. ORB score:", similarity_score)

matched = cv2.drawMatches(img1, kp1, img2, kp2, matches[:50], None, flags=2)
cv2.imshow("Matches", matched); cv2.waitKey(0); cv2.destroyAllWindows()

Notes & Setup Tips

  • Some programs need extra assets (e.g., known.jpg, person.jpg, car.jpg, road.mp4) and model files (shape_predictor_68_face_landmarks.dat, age/gender Caffe models).
  • For Tesseract OCR, install the Tesseract engine and ensure it’s on your system PATH (or set pytesseract.pytesseract.tesseract_cmd).
  • If OpenCV’s legacy tracker API differs in your version, you may need to change to cv2.TrackerCSRT_create() (older) or cv2.legacy.TrackerCSRT_create() (newer builds expose under legacy).

Leave a Reply