summaryrefslogtreecommitdiff
path: root/lib/main.dart
diff options
context:
space:
mode:
Diffstat (limited to 'lib/main.dart')
-rw-r--r--lib/main.dart241
1 files changed, 241 insertions, 0 deletions
diff --git a/lib/main.dart b/lib/main.dart
new file mode 100644
index 0000000..6480384
--- /dev/null
+++ b/lib/main.dart
@@ -0,0 +1,241 @@
+import 'dart:io';
+
+import 'package:camera/camera.dart';
+import 'package:flutter/material.dart';
+import 'package:google_mlkit_text_recognition/google_mlkit_text_recognition.dart';
+import 'package:permission_handler/permission_handler.dart';
+import 'package:flutter_tts/flutter_tts.dart';
+import 'package:ocr/result_screen.dart';
+
+void main() {
+ runApp(const App());
+}
+
+class App extends StatelessWidget {
+ const App({super.key});
+
+ @override
+ Widget build(BuildContext context) {
+ return MaterialApp(
+ title: 'MegaView Text',
+ theme: ThemeData(
+ colorScheme: ColorScheme.fromSeed(seedColor: Colors.deepPurpleAccent),
+ useMaterial3: true,
+ ),
+ debugShowCheckedModeBanner: false,
+ home: const MainScreen(),
+ );
+ }
+}
+
+class MainScreen extends StatefulWidget {
+ const MainScreen({super.key});
+
+ @override
+ State<MainScreen> createState() => _MainScreenState();
+}
+
+class _MainScreenState extends State<MainScreen> with WidgetsBindingObserver {
+ bool _isPermissionGranted = false;
+
+ late final Future<void> _future;
+ CameraController? _cameraController;
+
+ final textRecognizer = TextRecognizer();
+
+ FlutterTts flutterTts = FlutterTts(); // TTS
+
+ @override
+ void initState() {
+ super.initState();
+ initTTS(); // TTS
+
+ WidgetsBinding.instance.addObserver(this);
+
+ _future = _requestCameraPermission();
+ }
+
+ Future<void> initTTS() async { // TTS
+ await flutterTts.setLanguage("en-US"); // Set the language you want
+ await flutterTts.setSpeechRate(0.5); // Adjust speech rate (1.0 is normal but too fast for my liking)
+ await flutterTts.setVolume(1.0); // Adjust volume (0.0 to 1.0)
+ await flutterTts.setPitch(1.0); // Adjust pitch (1.0 is normal)
+
+ // You can set other configurations as well
+
+ // Check if TTS is available
+ // bool isAvailable = await flutterTts.isLanguageAvailable("en-US");
+ // print("TTS is available: $isAvailable");
+ }
+
+ Future<void> speak(String text) async {
+ await flutterTts.speak(text); // TTS
+ }
+
+ @override
+ void dispose() {
+ WidgetsBinding.instance.removeObserver(this);
+ _stopCamera();
+ textRecognizer.close();
+ flutterTts.stop(); // TTS Stop
+ super.dispose();
+ }
+
+ @override
+ void didChangeAppLifecycleState(AppLifecycleState state) {
+ if (_cameraController == null || !_cameraController!.value.isInitialized) {
+ return;
+ }
+
+ if (state == AppLifecycleState.inactive) {
+ _stopCamera();
+ } else if (state == AppLifecycleState.resumed &&
+ _cameraController != null &&
+ _cameraController!.value.isInitialized) {
+ _startCamera();
+ }
+ }
+
+ @override
+ Widget build(BuildContext context) {
+ return FutureBuilder(
+ future: _future,
+ builder: (context, snapshot) {
+ return Stack(
+ children: [
+ if (_isPermissionGranted)
+ FutureBuilder<List<CameraDescription>>(
+ future: availableCameras(),
+ builder: (context, snapshot) {
+ if (snapshot.hasData) {
+ _initCameraController(snapshot.data!);
+
+ return Center(child: CameraPreview(_cameraController!));
+ } else {
+ return const LinearProgressIndicator();
+ }
+ },
+ ),
+ Scaffold(
+ appBar: AppBar(
+ backgroundColor: Theme.of(context).colorScheme.inversePrimary,
+ title: const Text('MegaView Text Recognition'),
+ ),
+ backgroundColor: _isPermissionGranted ? Colors.transparent : null,
+ body: _isPermissionGranted
+ ? Column(
+ children: [
+ Expanded(
+ child: Container(),
+ ),
+ Container(
+ padding: const EdgeInsets.only(bottom: 13.0),
+ child: Center(
+ child: ElevatedButton(
+ onPressed: _scanImage,
+ child: const Text('Scan text'),
+ ),
+ ),
+ ),
+ ],
+ )
+ : Center(
+ child: Container(
+ padding: const EdgeInsets.only(left: 24.0, right: 24.0),
+ child: const Text(
+ 'Camera permission denied',
+ textAlign: TextAlign.center,
+ ),
+ ),
+ ),
+ ),
+ ],
+ );
+ },
+ );
+ }
+
+ Future<void> _requestCameraPermission() async {
+ final status = await Permission.camera.request();
+ _isPermissionGranted = status == PermissionStatus.granted;
+ }
+
+ void _startCamera() {
+ if (_cameraController != null) {
+ _cameraSelected(_cameraController!.description);
+ }
+ }
+
+ void _stopCamera() {
+ if (_cameraController != null) {
+ _cameraController?.dispose();
+ }
+ }
+
+ void _initCameraController(List<CameraDescription> cameras) {
+ if (_cameraController != null) {
+ return;
+ }
+
+ // Select the first rear camera.
+ CameraDescription? camera;
+ for (var i = 0; i < cameras.length; i++) {
+ final CameraDescription current = cameras[i];
+ if (current.lensDirection == CameraLensDirection.back) {
+ camera = current;
+ break;
+ }
+ }
+
+ if (camera != null) {
+ _cameraSelected(camera);
+ }
+ }
+
+ Future<void> _cameraSelected(CameraDescription camera) async {
+ _cameraController = CameraController(
+ camera,
+ ResolutionPreset.high,
+ enableAudio: false,
+ );
+
+ await _cameraController!.initialize();
+ await _cameraController!.setFlashMode(FlashMode.off);
+
+ if (!mounted) {
+ return;
+ }
+ setState(() {});
+ }
+
+ Future<void> _scanImage() async {
+ if (_cameraController == null) return;
+
+ final navigator = Navigator.of(context);
+
+ try {
+ final pictureFile = await _cameraController!.takePicture();
+
+ final file = File(pictureFile.path);
+
+ final inputImage = InputImage.fromFile(file);
+ final recognizedText = await textRecognizer.processImage(inputImage);
+
+ speak(recognizedText.text);
+
+ await navigator.push(
+ MaterialPageRoute(
+ builder: (BuildContext context) =>
+ ResultScreen(text: recognizedText.text)
+ ),
+ );
+ } catch (e) {
+ // ignore: use_build_context_synchronously
+ ScaffoldMessenger.of(context).showSnackBar(
+ const SnackBar(
+ content: Text('An error occurred when scanning text'),
+ ),
+ );
+ }
+ }
+} \ No newline at end of file